diff --git a/cms/.coveragerc b/cms/.coveragerc index 42638feb8f..9b1e59d670 100644 --- a/cms/.coveragerc +++ b/cms/.coveragerc @@ -2,11 +2,13 @@ [run] data_file = reports/cms/.coverage source = cms +omit = cms/envs/*, cms/manage.py [report] ignore_errors = True [html] +title = CMS Python Test Coverage Report directory = reports/cms/cover [xml] diff --git a/common/djangoapps/student/admin.py b/common/djangoapps/student/admin.py index ec3b708ca7..64fe844801 100644 --- a/common/djangoapps/student/admin.py +++ b/common/djangoapps/student/admin.py @@ -12,6 +12,8 @@ admin.site.register(UserTestGroup) admin.site.register(CourseEnrollment) +admin.site.register(CourseEnrollmentAllowed) + admin.site.register(Registration) admin.site.register(PendingNameChange) diff --git a/common/djangoapps/student/migrations/0023_add_more_fields_to_test_center_registration.py b/common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py similarity index 65% rename from common/djangoapps/student/migrations/0023_add_more_fields_to_test_center_registration.py rename to common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py index adcf5e6d66..f7e2571685 100644 --- a/common/djangoapps/student/migrations/0023_add_more_fields_to_test_center_registration.py +++ b/common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py @@ -8,59 +8,25 @@ from django.db import models class Migration(SchemaMigration): def forwards(self, orm): - # Adding field 'TestCenterUser.processed_at' - db.add_column('student_testcenteruser', 'processed_at', - self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), - keep_default=False) + # Adding model 'CourseEnrollmentAllowed' + db.create_table('student_courseenrollmentallowed', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + )) + db.send_create_signal('student', ['CourseEnrollmentAllowed']) - # Adding field 'TestCenterUser.confirmed_at' - db.add_column('student_testcenteruser', 'confirmed_at', - self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), - keep_default=False) - - # Adding field 'TestCenterRegistration.processed_at' - db.add_column('student_testcenterregistration', 'processed_at', - self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), - keep_default=False) - - # Adding field 'TestCenterRegistration.authorization_id' - db.add_column('student_testcenterregistration', 'authorization_id', - self.gf('django.db.models.fields.IntegerField')(null=True, db_index=True), - keep_default=False) - - # Adding field 'TestCenterRegistration.confirmed_at' - db.add_column('student_testcenterregistration', 'confirmed_at', - self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), - keep_default=False) - - # Adding index on 'TestCenterRegistration', fields ['accommodation_request'] - db.create_index('student_testcenterregistration', ['accommodation_request']) - - # Adding index on 'TestCenterRegistration', fields ['upload_status'] - db.create_index('student_testcenterregistration', ['upload_status']) + # Adding unique constraint on 'CourseEnrollmentAllowed', fields ['email', 'course_id'] + db.create_unique('student_courseenrollmentallowed', ['email', 'course_id']) def backwards(self, orm): - # Removing index on 'TestCenterRegistration', fields ['upload_status'] - db.delete_index('student_testcenterregistration', ['upload_status']) + # Removing unique constraint on 'CourseEnrollmentAllowed', fields ['email', 'course_id'] + db.delete_unique('student_courseenrollmentallowed', ['email', 'course_id']) - # Removing index on 'TestCenterRegistration', fields ['accommodation_request'] - db.delete_index('student_testcenterregistration', ['accommodation_request']) - - # Deleting field 'TestCenterUser.processed_at' - db.delete_column('student_testcenteruser', 'processed_at') - - # Deleting field 'TestCenterUser.confirmed_at' - db.delete_column('student_testcenteruser', 'confirmed_at') - - # Deleting field 'TestCenterRegistration.processed_at' - db.delete_column('student_testcenterregistration', 'processed_at') - - # Deleting field 'TestCenterRegistration.authorization_id' - db.delete_column('student_testcenterregistration', 'authorization_id') - - # Deleting field 'TestCenterRegistration.confirmed_at' - db.delete_column('student_testcenterregistration', 'confirmed_at') + # Deleting model 'CourseEnrollmentAllowed' + db.delete_table('student_courseenrollmentallowed') models = { @@ -107,6 +73,13 @@ class Migration(SchemaMigration): 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, + 'student.courseenrollmentallowed': { + 'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) + }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), @@ -127,27 +100,6 @@ class Migration(SchemaMigration): 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, - 'student.testcenterregistration': { - 'Meta': {'object_name': 'TestCenterRegistration'}, - 'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), - 'accommodation_request': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'blank': 'True'}), - 'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), - 'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), - 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), - 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), - 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), - 'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), - 'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), - 'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), - 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), - 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), - 'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}), - 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), - 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), - 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), - 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), - 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) - }, 'student.testcenteruser': { 'Meta': {'object_name': 'TestCenterUser'}, 'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}), @@ -155,9 +107,8 @@ class Migration(SchemaMigration): 'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), - 'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), - 'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), - 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'client_candidate_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), + 'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), @@ -170,14 +121,10 @@ class Migration(SchemaMigration): 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), - 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), - 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), - 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), - 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}), 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) }, diff --git a/common/djangoapps/student/migrations/0022_add_more_fields_to_test_center_user.py b/common/djangoapps/student/migrations/0023_add_test_center_registration.py similarity index 86% rename from common/djangoapps/student/migrations/0022_add_more_fields_to_test_center_user.py rename to common/djangoapps/student/migrations/0023_add_test_center_registration.py index 25d20f9e0d..c5af38dd37 100644 --- a/common/djangoapps/student/migrations/0022_add_more_fields_to_test_center_user.py +++ b/common/djangoapps/student/migrations/0023_add_test_center_registration.py @@ -21,28 +21,41 @@ class Migration(SchemaMigration): ('eligibility_appointment_date_first', self.gf('django.db.models.fields.DateField')(db_index=True)), ('eligibility_appointment_date_last', self.gf('django.db.models.fields.DateField')(db_index=True)), ('accommodation_code', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), - ('accommodation_request', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), - ('upload_status', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)), + ('accommodation_request', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=1024, blank=True)), ('uploaded_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), + ('processed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), + ('upload_status', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=20, blank=True)), ('upload_error_message', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)), + ('authorization_id', self.gf('django.db.models.fields.IntegerField')(null=True, db_index=True)), + ('confirmed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), )) db.send_create_signal('student', ['TestCenterRegistration']) - # Adding field 'TestCenterUser.upload_status' - db.add_column('student_testcenteruser', 'upload_status', - self.gf('django.db.models.fields.CharField')(db_index=True, default='', max_length=20, blank=True), - keep_default=False) - # Adding field 'TestCenterUser.uploaded_at' db.add_column('student_testcenteruser', 'uploaded_at', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True), keep_default=False) + # Adding field 'TestCenterUser.processed_at' + db.add_column('student_testcenteruser', 'processed_at', + self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), + keep_default=False) + + # Adding field 'TestCenterUser.upload_status' + db.add_column('student_testcenteruser', 'upload_status', + self.gf('django.db.models.fields.CharField')(db_index=True, default='', max_length=20, blank=True), + keep_default=False) + # Adding field 'TestCenterUser.upload_error_message' db.add_column('student_testcenteruser', 'upload_error_message', self.gf('django.db.models.fields.CharField')(default='', max_length=512, blank=True), keep_default=False) + # Adding field 'TestCenterUser.confirmed_at' + db.add_column('student_testcenteruser', 'confirmed_at', + self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), + keep_default=False) + # Adding index on 'TestCenterUser', fields ['company_name'] db.create_index('student_testcenteruser', ['company_name']) @@ -60,15 +73,21 @@ class Migration(SchemaMigration): # Deleting model 'TestCenterRegistration' db.delete_table('student_testcenterregistration') - # Deleting field 'TestCenterUser.upload_status' - db.delete_column('student_testcenteruser', 'upload_status') - # Deleting field 'TestCenterUser.uploaded_at' db.delete_column('student_testcenteruser', 'uploaded_at') + # Deleting field 'TestCenterUser.processed_at' + db.delete_column('student_testcenteruser', 'processed_at') + + # Deleting field 'TestCenterUser.upload_status' + db.delete_column('student_testcenteruser', 'upload_status') + # Deleting field 'TestCenterUser.upload_error_message' db.delete_column('student_testcenteruser', 'upload_error_message') + # Deleting field 'TestCenterUser.confirmed_at' + db.delete_column('student_testcenteruser', 'confirmed_at') + models = { 'auth.group': { @@ -114,6 +133,13 @@ class Migration(SchemaMigration): 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, + 'student.courseenrollmentallowed': { + 'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) + }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), @@ -137,18 +163,21 @@ class Migration(SchemaMigration): 'student.testcenterregistration': { 'Meta': {'object_name': 'TestCenterRegistration'}, 'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), - 'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), + 'accommodation_request': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'blank': 'True'}), + 'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), + 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), 'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), 'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), - 'upload_status': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), + 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) }, @@ -161,6 +190,7 @@ class Migration(SchemaMigration): 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), + 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), @@ -173,6 +203,7 @@ class Migration(SchemaMigration): 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), + 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py index d15afc5d83..bfa7077176 100644 --- a/common/djangoapps/student/models.py +++ b/common/djangoapps/student/models.py @@ -52,7 +52,6 @@ from django.dispatch import receiver from django.forms import ModelForm, forms import comment_client as cc -from django_comment_client.models import Role log = logging.getLogger(__name__) @@ -615,15 +614,22 @@ class CourseEnrollment(models.Model): return "[CourseEnrollment] %s: %s (%s)" % (self.user, self.course_id, self.created) -@receiver(post_save, sender=CourseEnrollment) -def assign_default_role(sender, instance, **kwargs): - if instance.user.is_staff: - role = Role.objects.get_or_create(course_id=instance.course_id, name="Moderator")[0] - else: - role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0] +class CourseEnrollmentAllowed(models.Model): + """ + Table of users (specified by email address strings) who are allowed to enroll in a specified course. + The user may or may not (yet) exist. Enrollment by users listed in this table is allowed + even if the enrollment time window is past. + """ + email = models.CharField(max_length=255, db_index=True) + course_id = models.CharField(max_length=255, db_index=True) - logging.info("assign_default_role: adding %s as %s" % (instance.user, role)) - instance.user.roles.add(role) + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + + class Meta: + unique_together = (('email', 'course_id'), ) + + def __unicode__(self): + return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created) #cache_relation(User.profile) diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py index 12c869cae6..2c427d25bb 100644 --- a/common/djangoapps/student/views.py +++ b/common/djangoapps/student/views.py @@ -42,7 +42,7 @@ from xmodule.modulestore.django import modulestore #from datetime import date from collections import namedtuple -from courseware.courses import get_courses_by_university +from courseware.courses import get_courses from courseware.access import has_access from statsd import statsd @@ -76,16 +76,21 @@ def index(request, extra_context={}, user=None): domain = settings.MITX_FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False if domain==False: # do explicit check, because domain=None is valid domain = request.META.get('HTTP_HOST') - universities = get_courses_by_university(None, - domain=domain) + + courses = get_courses(None, domain=domain) + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) # Get the 3 most recent news top_news = _get_news(top=3) - context = {'universities': universities, 'news': top_news} + context = {'courses': courses, 'news': top_news} context.update(extra_context) return render_to_response('index.html', context) + def course_from_id(course_id): """Return the CourseDescriptor corresponding to this course_id""" course_loc = CourseDescriptor.id_to_location(course_id) @@ -338,6 +343,14 @@ def change_enrollment(request): return {'success': False, 'error': 'We weren\'t able to unenroll you. Please try again.'} +@ensure_csrf_cookie +def accounts_login(request, error=""): + + + return render_to_response('accounts_login.html', { 'error': error }) + + + # Need different levels of logging @ensure_csrf_cookie def login_user(request, error=""): diff --git a/common/djangoapps/track/migrations/0001_initial.py b/common/djangoapps/track/migrations/0001_initial.py new file mode 100644 index 0000000000..0546203cf8 --- /dev/null +++ b/common/djangoapps/track/migrations/0001_initial.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'TrackingLog' + db.create_table('track_trackinglog', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), + ('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)), + ('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('event', self.gf('django.db.models.fields.TextField')(blank=True)), + ('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)), + ('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)), + ('time', self.gf('django.db.models.fields.DateTimeField')()), + )) + db.send_create_signal('track', ['TrackingLog']) + + + def backwards(self, orm): + # Deleting model 'TrackingLog' + db.delete_table('track_trackinglog') + + + models = { + 'track.trackinglog': { + 'Meta': {'object_name': 'TrackingLog'}, + 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), + 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), + 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), + 'time': ('django.db.models.fields.DateTimeField', [], {}), + 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) + } + } + + complete_apps = ['track'] \ No newline at end of file diff --git a/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py new file mode 100644 index 0000000000..4c73aa3bfd --- /dev/null +++ b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding field 'TrackingLog.host' + db.add_column('track_trackinglog', 'host', + self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True), + keep_default=False) + + + # Changing field 'TrackingLog.event_type' + db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512)) + + # Changing field 'TrackingLog.page' + db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True)) + + def backwards(self, orm): + # Deleting field 'TrackingLog.host' + db.delete_column('track_trackinglog', 'host') + + + # Changing field 'TrackingLog.event_type' + db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32)) + + # Changing field 'TrackingLog.page' + db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)) + + models = { + 'track.trackinglog': { + 'Meta': {'object_name': 'TrackingLog'}, + 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), + 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), + 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), + 'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), + 'time': ('django.db.models.fields.DateTimeField', [], {}), + 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) + } + } + + complete_apps = ['track'] \ No newline at end of file diff --git a/common/djangoapps/track/migrations/__init__.py b/common/djangoapps/track/migrations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/common/djangoapps/track/models.py b/common/djangoapps/track/models.py index 401fa2832f..dfdf7a0558 100644 --- a/common/djangoapps/track/models.py +++ b/common/djangoapps/track/models.py @@ -7,11 +7,12 @@ class TrackingLog(models.Model): username = models.CharField(max_length=32,blank=True) ip = models.CharField(max_length=32,blank=True) event_source = models.CharField(max_length=32) - event_type = models.CharField(max_length=32,blank=True) + event_type = models.CharField(max_length=512,blank=True) event = models.TextField(blank=True) agent = models.CharField(max_length=256,blank=True) - page = models.CharField(max_length=32,blank=True,null=True) + page = models.CharField(max_length=512,blank=True,null=True) time = models.DateTimeField('event time') + host = models.CharField(max_length=64,blank=True) def __unicode__(self): s = "[%s] %s@%s: %s | %s | %s | %s" % (self.time, self.username, self.ip, self.event_source, diff --git a/common/djangoapps/track/views.py b/common/djangoapps/track/views.py index 434e75a63f..54bd476799 100644 --- a/common/djangoapps/track/views.py +++ b/common/djangoapps/track/views.py @@ -17,7 +17,7 @@ from track.models import TrackingLog log = logging.getLogger("tracking") -LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time'] +LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time','host'] def log_event(event): event_str = json.dumps(event) @@ -58,6 +58,7 @@ def user_track(request): "agent": agent, "page": request.GET['page'], "time": datetime.datetime.utcnow().isoformat(), + "host": request.META['SERVER_NAME'], } log_event(event) return HttpResponse('success') @@ -83,6 +84,7 @@ def server_track(request, event_type, event, page=None): "agent": agent, "page": page, "time": datetime.datetime.utcnow().isoformat(), + "host": request.META['SERVER_NAME'], } if event_type.startswith("/event_logs") and request.user.is_staff: # don't log diff --git a/common/lib/capa/.coveragerc b/common/lib/capa/.coveragerc index 6af3218f75..149a4c860a 100644 --- a/common/lib/capa/.coveragerc +++ b/common/lib/capa/.coveragerc @@ -7,6 +7,7 @@ source = common/lib/capa ignore_errors = True [html] +title = Capa Python Test Coverage Report directory = reports/common/lib/capa/cover [xml] diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 73056bc09e..1d3646fefc 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -735,51 +735,3 @@ class ChemicalEquationInput(InputTypeBase): registry.register(ChemicalEquationInput) #----------------------------------------------------------------------------- - -class OpenEndedInput(InputTypeBase): - """ - A text area input for code--uses codemirror, does syntax highlighting, special tab handling, - etc. - """ - - template = "openendedinput.html" - tags = ['openendedinput'] - - # pulled out for testing - submitted_msg = ("Feedback not yet available. Reload to check again. " - "Once the problem is graded, this message will be " - "replaced with the grader's feedback") - - @classmethod - def get_attributes(cls): - """ - Convert options to a convenient format. - """ - return [Attribute('rows', '30'), - Attribute('cols', '80'), - Attribute('hidden', ''), - ] - - def setup(self): - """ - Implement special logic: handle queueing state, and default input. - """ - # if no student input yet, then use the default input given by the problem - if not self.value: - self.value = self.xml.text - - # Check if problem has been queued - self.queue_len = 0 - # Flag indicating that the problem has been queued, 'msg' is length of queue - if self.status == 'incomplete': - self.status = 'queued' - self.queue_len = self.msg - self.msg = self.submitted_msg - - def _extra_context(self): - """Defined queue_len, add it """ - return {'queue_len': self.queue_len,} - -registry.register(OpenEndedInput) - -#----------------------------------------------------------------------------- diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 8517e71d04..3d97cb0bea 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -629,7 +629,7 @@ class MultipleChoiceResponse(LoncapaResponse): # define correct choices (after calling secondary setup) xml = self.xml cxml = xml.xpath('//*[@id=$id]//choice[@correct="true"]', id=xml.get('id')) - self.correct_choices = [choice.get('name') for choice in cxml] + self.correct_choices = [contextualize_text(choice.get('name'), self.context) for choice in cxml] def mc_setup_response(self): ''' @@ -723,7 +723,7 @@ class OptionResponse(LoncapaResponse): return cmap def get_answers(self): - amap = dict([(af.get('id'), af.get('correct')) for af in self.answer_fields]) + amap = dict([(af.get('id'), contextualize_text(af.get('correct'), self.context)) for af in self.answer_fields]) # log.debug('%s: expected answers=%s' % (unicode(self),amap)) return amap @@ -1815,347 +1815,6 @@ class ImageResponse(LoncapaResponse): return (dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]), dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements])) #----------------------------------------------------------------------------- - -class OpenEndedResponse(LoncapaResponse): - """ - Grade student open ended responses using an external grading system, - accessed through the xqueue system. - - Expects 'xqueue' dict in ModuleSystem with the following keys that are - needed by OpenEndedResponse: - - system.xqueue = { 'interface': XqueueInterface object, - 'callback_url': Per-StudentModule callback URL - where results are posted (string), - } - - External requests are only submitted for student submission grading - (i.e. and not for getting reference answers) - - By default, uses the OpenEndedResponse.DEFAULT_QUEUE queue. - """ - - DEFAULT_QUEUE = 'open-ended' - response_tag = 'openendedresponse' - allowed_inputfields = ['openendedinput'] - max_inputfields = 1 - - def setup_response(self): - ''' - Configure OpenEndedResponse from XML. - ''' - xml = self.xml - self.url = xml.get('url', None) - self.queue_name = xml.get('queuename', self.DEFAULT_QUEUE) - - # The openendedparam tag encapsulates all grader settings - oeparam = self.xml.find('openendedparam') - prompt = self.xml.find('prompt') - rubric = self.xml.find('openendedrubric') - - if oeparam is None: - raise ValueError("No oeparam found in problem xml.") - if prompt is None: - raise ValueError("No prompt found in problem xml.") - if rubric is None: - raise ValueError("No rubric found in problem xml.") - - self._parse(oeparam, prompt, rubric) - - @staticmethod - def stringify_children(node): - """ - Modify code from stringify_children in xmodule. Didn't import directly - in order to avoid capa depending on xmodule (seems to be avoided in - code) - """ - parts=[node.text if node.text is not None else ''] - for p in node.getchildren(): - parts.append(etree.tostring(p, with_tail=True, encoding='unicode')) - - return ' '.join(parts) - - def _parse(self, oeparam, prompt, rubric): - ''' - Parse OpenEndedResponse XML: - self.initial_display - self.payload - dict containing keys -- - 'grader' : path to grader settings file, 'problem_id' : id of the problem - - self.answer - What to display when show answer is clicked - ''' - # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload - prompt_string = self.stringify_children(prompt) - rubric_string = self.stringify_children(rubric) - - grader_payload = oeparam.find('grader_payload') - grader_payload = grader_payload.text if grader_payload is not None else '' - - #Update grader payload with student id. If grader payload not json, error. - try: - parsed_grader_payload = json.loads(grader_payload) - # NOTE: self.system.location is valid because the capa_module - # __init__ adds it (easiest way to get problem location into - # response types) - except TypeError, ValueError: - log.exception("Grader payload %r is not a json object!", grader_payload) - parsed_grader_payload.update({ - 'location' : self.system.location, - 'course_id' : self.system.course_id, - 'prompt' : prompt_string, - 'rubric' : rubric_string, - }) - updated_grader_payload = json.dumps(parsed_grader_payload) - - self.payload = {'grader_payload': updated_grader_payload} - - self.initial_display = find_with_default(oeparam, 'initial_display', '') - self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') - try: - self.max_score = int(find_with_default(oeparam, 'max_score', 1)) - except ValueError: - self.max_score = 1 - - def get_score(self, student_answers): - - try: - submission = student_answers[self.answer_id] - except KeyError: - msg = ('Cannot get student answer for answer_id: {0}. student_answers {1}' - .format(self.answer_id, student_answers)) - log.exception(msg) - raise LoncapaProblemError(msg) - - # Prepare xqueue request - #------------------------------------------------------------ - - qinterface = self.system.xqueue['interface'] - qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) - - anonymous_student_id = self.system.anonymous_student_id - - # Generate header - queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime + - anonymous_student_id + - self.answer_id) - - xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'], - lms_key=queuekey, - queue_name=self.queue_name) - - self.context.update({'submission': submission}) - - contents = self.payload.copy() - - # Metadata related to the student submission revealed to the external grader - student_info = {'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, - } - - #Update contents with student response and student info - contents.update({ - 'student_info': json.dumps(student_info), - 'student_response': submission, - 'max_score' : self.max_score - }) - - # Submit request. When successful, 'msg' is the prior length of the queue - (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) - - # State associated with the queueing request - queuestate = {'key': queuekey, - 'time': qtime,} - - cmap = CorrectMap() - if error: - cmap.set(self.answer_id, queuestate=None, - msg='Unable to deliver your submission to grader. (Reason: {0}.)' - ' Please try again later.'.format(msg)) - else: - # Queueing mechanism flags: - # 1) Backend: Non-null CorrectMap['queuestate'] indicates that - # the problem has been queued - # 2) Frontend: correctness='incomplete' eventually trickles down - # through inputtypes.textbox and .filesubmission to inform the - # browser that the submission is queued (and it could e.g. poll) - cmap.set(self.answer_id, queuestate=queuestate, - correctness='incomplete', msg=msg) - - return cmap - - def update_score(self, score_msg, oldcmap, queuekey): - log.debug(score_msg) - score_msg = self._parse_score_msg(score_msg) - if not score_msg.valid: - oldcmap.set(self.answer_id, - msg = 'Invalid grader reply. Please contact the course staff.') - return oldcmap - - correctness = 'correct' if score_msg.correct else 'incorrect' - - # TODO: Find out how this is used elsewhere, if any - self.context['correct'] = correctness - - # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey - # does not match, we keep waiting for the score_msg whose key actually matches - if oldcmap.is_right_queuekey(self.answer_id, queuekey): - # Sanity check on returned points - points = score_msg.points - if points < 0: - points = 0 - - # Queuestate is consumed, so reset it to None - oldcmap.set(self.answer_id, npoints=points, correctness=correctness, - msg = score_msg.msg.replace(' ', ' '), queuestate=None) - else: - log.debug('OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'.format( - queuekey, self.answer_id)) - - return oldcmap - - def get_answers(self): - anshtml = '
{0}
'.format(self.answer) - return {self.answer_id: anshtml} - - def get_initial_display(self): - return {self.answer_id: self.initial_display} - - def _convert_longform_feedback_to_html(self, response_items): - """ - Take in a dictionary, and return html strings for display to student. - Input: - response_items: Dictionary with keys success, feedback. - if success is True, feedback should be a dictionary, with keys for - types of feedback, and the corresponding feedback values. - if success is False, feedback is actually an error string. - - NOTE: this will need to change when we integrate peer grading, because - that will have more complex feedback. - - Output: - String -- html that can be displayed to the student. - """ - - # We want to display available feedback in a particular order. - # This dictionary specifies which goes first--lower first. - priorities = {# These go at the start of the feedback - 'spelling': 0, - 'grammar': 1, - # needs to be after all the other feedback - 'markup_text': 3} - - default_priority = 2 - - def get_priority(elt): - """ - Args: - elt: a tuple of feedback-type, feedback - Returns: - the priority for this feedback type - """ - return priorities.get(elt[0], default_priority) - - def format_feedback(feedback_type, value): - return """ -
- {value} -
- """.format(feedback_type=feedback_type, value=value) - - # TODO (vshnayder): design and document the details of this format so - # that we can do proper escaping here (e.g. are the graders allowed to - # include HTML?) - - for tag in ['success', 'feedback']: - if tag not in response_items: - return format_feedback('errors', 'Error getting feedback') - - feedback_items = response_items['feedback'] - try: - feedback = json.loads(feedback_items) - except (TypeError, ValueError): - log.exception("feedback_items have invalid json %r", feedback_items) - return format_feedback('errors', 'Could not parse feedback') - - if response_items['success']: - if len(feedback) == 0: - return format_feedback('errors', 'No feedback available') - - feedback_lst = sorted(feedback.items(), key=get_priority) - return u"\n".join(format_feedback(k, v) for k, v in feedback_lst) - else: - return format_feedback('errors', response_items['feedback']) - - - def _format_feedback(self, response_items): - """ - Input: - Dictionary called feedback. Must contain keys seen below. - Output: - Return error message or feedback template - """ - - feedback = self._convert_longform_feedback_to_html(response_items) - - if not response_items['success']: - return self.system.render_template("open_ended_error.html", - {'errors' : feedback}) - - feedback_template = self.system.render_template("open_ended_feedback.html", { - 'grader_type': response_items['grader_type'], - 'score': response_items['score'], - 'feedback': feedback, - }) - - return feedback_template - - - def _parse_score_msg(self, score_msg): - """ - Grader reply is a JSON-dump of the following dict - { 'correct': True/False, - 'score': Numeric value (floating point is okay) to assign to answer - 'msg': grader_msg - 'feedback' : feedback from grader - } - - Returns (valid_score_msg, correct, score, msg): - valid_score_msg: Flag indicating valid score_msg format (Boolean) - correct: Correctness of submission (Boolean) - score: Points to be assigned (numeric, can be float) - """ - fail = ScoreMessage(valid=False, correct=False, points=0, msg='') - try: - score_result = json.loads(score_msg) - except (TypeError, ValueError): - log.error("External grader message should be a JSON-serialized dict." - " Received score_msg = {0}".format(score_msg)) - return fail - - if not isinstance(score_result, dict): - log.error("External grader message should be a JSON-serialized dict." - " Received score_result = {0}".format(score_result)) - return fail - - for tag in ['score', 'feedback', 'grader_type', 'success']: - if tag not in score_result: - log.error("External grader message is missing required tag: {0}" - .format(tag)) - return fail - - feedback = self._format_feedback(score_result) - - # HACK: for now, just assume it's correct if you got more than 2/3. - # Also assumes that score_result['score'] is an integer. - score_ratio = int(score_result['score']) / self.max_score - correct = (score_ratio >= 0.66) - - #Currently ignore msg and only return feedback (which takes the place of msg) - return ScoreMessage(valid=True, correct=correct, - points=score_result['score'], msg=feedback) - -#----------------------------------------------------------------------------- # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration @@ -2172,5 +1831,4 @@ __all__ = [CodeResponse, ChoiceResponse, MultipleChoiceResponse, TrueFalseResponse, - JavascriptResponse, - OpenEndedResponse] + JavascriptResponse] diff --git a/common/lib/capa/capa/templates/openendedinput.html b/common/lib/capa/capa/templates/openendedinput.html deleted file mode 100644 index 65fc7fb9bb..0000000000 --- a/common/lib/capa/capa/templates/openendedinput.html +++ /dev/null @@ -1,32 +0,0 @@ -
- - -
- % if status == 'unsubmitted': - Unanswered - % elif status == 'correct': - Correct - % elif status == 'incorrect': - Incorrect - % elif status == 'queued': - Submitted for grading - % endif - - % if hidden: -
- % endif -
- - - - % if status == 'queued': - - % endif -
- ${msg|n} -
-
diff --git a/common/lib/xmodule/.coveragerc b/common/lib/xmodule/.coveragerc index 310c8e778b..baadd30829 100644 --- a/common/lib/xmodule/.coveragerc +++ b/common/lib/xmodule/.coveragerc @@ -7,6 +7,7 @@ source = common/lib/xmodule ignore_errors = True [html] +title = XModule Python Test Coverage Report directory = reports/common/lib/xmodule/cover [xml] diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index d3889bc388..86636ef05a 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -19,6 +19,7 @@ setup( "abtest = xmodule.abtest_module:ABTestDescriptor", "book = xmodule.backcompat_module:TranslateCustomTagDescriptor", "chapter = xmodule.seq_module:SequenceDescriptor", + "combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor", "course = xmodule.course_module:CourseDescriptor", "customtag = xmodule.template_module:CustomTagDescriptor", "discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor", @@ -28,7 +29,6 @@ setup( "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "section = xmodule.backcompat_module:SemanticSectionDescriptor", - "selfassessment = xmodule.self_assessment_module:SelfAssessmentDescriptor", "sequential = xmodule.seq_module:SequenceDescriptor", "slides = xmodule.backcompat_module:TranslateCustomTagDescriptor", "vertical = xmodule.vertical_module:VerticalDescriptor", @@ -36,6 +36,7 @@ setup( "videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor", "videosequence = xmodule.seq_module:SequenceDescriptor", "discussion = xmodule.discussion_module:DiscussionDescriptor", + "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", ] } ) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 4c10a1703a..1da271072a 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -430,6 +430,7 @@ class CapaModule(XModule): return False + def update_score(self, get): """ Delivers grading response (e.g. from asynchronous code checking) to diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py new file mode 100644 index 0000000000..a88acc6ffd --- /dev/null +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -0,0 +1,598 @@ +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .x_module import XModule +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +import self_assessment_module +import open_ended_module + +from mitxmako.shortcuts import render_to_string + +log = logging.getLogger("mitx.courseware") + +# Set the default number of max attempts. Should be 1 for production +# Set higher for debugging/testing +# attempts specified in xml definition overrides this. +MAX_ATTEMPTS = 10000 + +# Set maximum available number of points. +# Overriden by max_score specified in xml. +MAX_SCORE = 1 + +class CombinedOpenEndedModule(XModule): + """ + This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). + It transitions between problems, and support arbitrary ordering. + Each combined open ended module contains one or multiple "child" modules. + Child modules track their own state, and can transition between states. They also implement get_html and + handle_ajax. + The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess + ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) + ajax actions implemented by all children are: + 'save_answer' -- Saves the student answer + 'save_assessment' -- Saves the student assessment (or external grader assessment) + 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) + ajax actions implemented by combined open ended module are: + 'reset' -- resets the whole combined open ended module and returns to the first child module + 'next_problem' -- moves to the next child module + 'get_results' -- gets results from a given child module + + Types of children. Task is synonymous with child module, so each combined open ended module + incorporates multiple children (tasks): + openendedmodule + selfassessmentmodule + """ + STATE_VERSION = 1 + + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + INTERMEDIATE_DONE = 'intermediate_done' + DONE = 'done' + + js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/javascript_loader.coffee'), + ]} + js_module_name = "CombinedOpenEnded" + + css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} + + def __init__(self, system, location, definition, descriptor, + instance_state=None, shared_state=None, **kwargs): + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + """ + Definition file should have one or many task blocks, a rubric block, and a prompt block: + + Sample file: + + + Blah blah rubric. + + + Some prompt. + + + + + What hint about this problem would you give to someone? + + + Save Succcesful. Thanks for participating! + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", + "problem_id" : "6.002x/Welcome/OETest"} + + + + + + """ + + # Load instance state + if instance_state is not None: + instance_state = json.loads(instance_state) + else: + instance_state = {} + + #We need to set the location here so the child modules can use it + system.set('location', location) + + #Tells the system which xml definition to load + self.current_task_number = instance_state.get('current_task_number', 0) + #This loads the states of the individual children + self.task_states = instance_state.get('task_states', []) + #Overall state of the combined open ended module + self.state = instance_state.get('state', self.INITIAL) + + self.attempts = instance_state.get('attempts', 0) + + #Allow reset is true if student has failed the criteria to move to the next child task + self.allow_reset = instance_state.get('ready_to_reset', False) + self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) + + # Used for progress / grading. Currently get credit just for + # completion (doesn't matter if you self-assessed correct/incorrect). + self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) + + #Static data is passed to the child modules to render + self.static_data = { + 'max_score': self._max_score, + 'max_attempts': self.max_attempts, + 'prompt': definition['prompt'], + 'rubric': definition['rubric'] + } + + self.task_xml = definition['task_xml'] + self.setup_next_task() + + def get_tag_name(self, xml): + """ + Gets the tag name of a given xml block. + Input: XML string + Output: The name of the root tag + """ + tag = etree.fromstring(xml).tag + return tag + + def overwrite_state(self, current_task_state): + """ + Overwrites an instance state and sets the latest response to the current response. This is used + to ensure that the student response is carried over from the first child to the rest. + Input: Task state json string + Output: Task state json string + """ + last_response_data = self.get_last_response(self.current_task_number - 1) + last_response = last_response_data['response'] + + loaded_task_state = json.loads(current_task_state) + if loaded_task_state['state'] == self.INITIAL: + loaded_task_state['state'] = self.ASSESSING + loaded_task_state['created'] = True + loaded_task_state['history'].append({'answer': last_response}) + current_task_state = json.dumps(loaded_task_state) + return current_task_state + + def child_modules(self): + """ + Returns the constructors associated with the child modules in a dictionary. This makes writing functions + simpler (saves code duplication) + Input: None + Output: A dictionary of dictionaries containing the descriptor functions and module functions + """ + child_modules = { + 'openended': open_ended_module.OpenEndedModule, + 'selfassessment': self_assessment_module.SelfAssessmentModule, + } + child_descriptors = { + 'openended': open_ended_module.OpenEndedDescriptor, + 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, + } + children = { + 'modules': child_modules, + 'descriptors': child_descriptors, + } + return children + + def setup_next_task(self, reset=False): + """ + Sets up the next task for the module. Creates an instance state if none exists, carries over the answer + from the last instance state to the next if needed. + Input: A boolean indicating whether or not the reset function is calling. + Output: Boolean True (not useful right now) + """ + current_task_state = None + if len(self.task_states) > self.current_task_number: + current_task_state = self.task_states[self.current_task_number] + + self.current_task_xml = self.task_xml[self.current_task_number] + + if self.current_task_number > 0: + self.allow_reset = self.check_allow_reset() + if self.allow_reset: + self.current_task_number = self.current_task_number - 1 + + current_task_type = self.get_tag_name(self.current_task_xml) + + children = self.child_modules() + child_task_module = children['modules'][current_task_type] + + self.current_task_descriptor = children['descriptors'][current_task_type](self.system) + + #This is the xml object created from the xml definition of the current task + etree_xml = etree.fromstring(self.current_task_xml) + + #This sends the etree_xml object through the descriptor module of the current task, and + #returns the xml parsed by the descriptor + self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) + if current_task_state is None and self.current_task_number == 0: + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) + self.task_states.append(self.current_task.get_instance_state()) + self.state = self.ASSESSING + elif current_task_state is None and self.current_task_number > 0: + last_response_data = self.get_last_response(self.current_task_number - 1) + last_response = last_response_data['response'] + current_task_state=json.dumps({ + 'state' : self.ASSESSING, + 'version' : self.STATE_VERSION, + 'max_score' : self._max_score, + 'attempts' : 0, + 'created' : True, + 'history' : [{'answer' : str(last_response)}], + }) + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, + instance_state=current_task_state) + self.task_states.append(self.current_task.get_instance_state()) + self.state = self.ASSESSING + else: + if self.current_task_number > 0 and not reset: + current_task_state = self.overwrite_state(current_task_state) + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, + instance_state=current_task_state) + + log.debug(current_task_state) + return True + + def check_allow_reset(self): + """ + Checks to see if the student has passed the criteria to move to the next module. If not, sets + allow_reset to true and halts the student progress through the tasks. + Input: None + Output: the allow_reset attribute of the current module. + """ + if not self.allow_reset: + if self.current_task_number > 0: + last_response_data = self.get_last_response(self.current_task_number - 1) + current_response_data = self.get_current_attributes(self.current_task_number) + + if(current_response_data['min_score_to_attempt'] > last_response_data['score'] + or current_response_data['max_score_to_attempt'] < last_response_data['score']): + self.state = self.DONE + self.allow_reset = True + + return self.allow_reset + + def get_context(self): + """ + Generates a context dictionary that is used to render html. + Input: None + Output: A dictionary that can be rendered into the combined open ended template. + """ + task_html = self.get_html_base() + #set context variables and render template + + context = { + 'items': [{'content': task_html}], + 'ajax_url': self.system.ajax_url, + 'allow_reset': self.allow_reset, + 'state': self.state, + 'task_count': len(self.task_xml), + 'task_number': self.current_task_number + 1, + 'status': self.get_status(), + } + + return context + + def get_html(self): + """ + Gets HTML for rendering. + Input: None + Output: rendered html + """ + context = self.get_context() + html = self.system.render_template('combined_open_ended.html', context) + return html + + def get_html_nonsystem(self): + """ + Gets HTML for rendering via AJAX. Does not use system, because system contains some additional + html, which is not appropriate for returning via ajax calls. + Input: None + Output: HTML rendered directly via Mako + """ + context = self.get_context() + html = render_to_string('combined_open_ended.html', context) + return html + + def get_html_base(self): + """ + Gets the HTML associated with the current child task + Input: None + Output: Child task HTML + """ + self.update_task_states() + html = self.current_task.get_html(self.system) + return_html = rewrite_links(html, self.rewrite_content_links) + return return_html + + def get_current_attributes(self, task_number): + """ + Gets the min and max score to attempt attributes of the specified task. + Input: The number of the task. + Output: The minimum and maximum scores needed to move on to the specified task. + """ + task_xml = self.task_xml[task_number] + etree_xml = etree.fromstring(task_xml) + min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) + max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) + return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt} + + def get_last_response(self, task_number): + """ + Returns data associated with the specified task number, such as the last response, score, etc. + Input: The number of the task. + Output: A dictionary that contains information about the specified task. + """ + last_response = "" + task_state = self.task_states[task_number] + task_xml = self.task_xml[task_number] + task_type = self.get_tag_name(task_xml) + + children = self.child_modules() + + task_descriptor = children['descriptors'][task_type](self.system) + etree_xml = etree.fromstring(task_xml) + + min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) + max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) + + task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) + task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, + self.static_data, instance_state=task_state) + last_response = task.latest_answer() + last_score = task.latest_score() + last_post_assessment = task.latest_post_assessment() + last_post_feedback = "" + if task_type == "openended": + last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False) + if isinstance(last_post_assessment, list): + eval_list = [] + for i in xrange(0, len(last_post_assessment)): + eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i])) + last_post_evaluation = "".join(eval_list) + else: + last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment) + last_post_assessment = last_post_evaluation + last_correctness = task.is_last_response_correct() + max_score = task.max_score() + state = task.state + last_response_dict = { + 'response': last_response, + 'score': last_score, + 'post_assessment': last_post_assessment, + 'type': task_type, + 'max_score': max_score, + 'state': state, + 'human_state': task.HUMAN_NAMES[state], + 'correct': last_correctness, + 'min_score_to_attempt': min_score_to_attempt, + 'max_score_to_attempt': max_score_to_attempt, + } + + return last_response_dict + + def update_task_states(self): + """ + Updates the task state of the combined open ended module with the task state of the current child module. + Input: None + Output: boolean indicating whether or not the task state changed. + """ + changed = False + if not self.allow_reset: + self.task_states[self.current_task_number] = self.current_task.get_instance_state() + current_task_state = json.loads(self.task_states[self.current_task_number]) + if current_task_state['state'] == self.DONE: + self.current_task_number += 1 + if self.current_task_number >= (len(self.task_xml)): + self.state = self.DONE + self.current_task_number = len(self.task_xml) - 1 + else: + self.state = self.INITIAL + changed = True + self.setup_next_task() + return changed + + def update_task_states_ajax(self, return_html): + """ + Runs the update task states function for ajax calls. Currently the same as update_task_states + Input: The html returned by the handle_ajax function of the child + Output: New html that should be rendered + """ + changed = self.update_task_states() + if changed: + #return_html=self.get_html() + pass + return return_html + + def get_results(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + task_number = int(get['task_number']) + self.update_task_states() + response_dict = self.get_last_response(task_number) + context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} + html = render_to_string('combined_open_ended_results.html', context) + return {'html': html, 'success': True} + + def handle_ajax(self, dispatch, get): + """ + This is called by courseware.module_render, to handle an AJAX call. + "get" is request.POST. + + Returns a json dictionary: + { 'progress_changed' : True/False, + 'progress': 'none'/'in_progress'/'done', + } + """ + + handlers = { + 'next_problem': self.next_problem, + 'reset': self.reset, + 'get_results': self.get_results + } + + if dispatch not in handlers: + return_html = self.current_task.handle_ajax(dispatch, get, self.system) + return self.update_task_states_ajax(return_html) + + d = handlers[dispatch](get) + return json.dumps(d, cls=ComplexEncoder) + + def next_problem(self, get): + """ + Called via ajax to advance to the next problem. + Input: AJAX get request. + Output: Dictionary to be rendered + """ + self.update_task_states() + return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset} + + def reset(self, get): + """ + If resetting is allowed, reset the state of the combined open ended module. + Input: AJAX get dictionary + Output: AJAX dictionary to tbe rendered + """ + if self.state != self.DONE: + if not self.allow_reset: + return self.out_of_sync_error(get) + + if self.attempts > self.max_attempts: + return { + 'success': False, + 'error': 'Too many attempts.' + } + self.state = self.INITIAL + self.allow_reset = False + for i in xrange(0, len(self.task_xml)): + self.current_task_number = i + self.setup_next_task(reset=True) + self.current_task.reset(self.system) + self.task_states[self.current_task_number] = self.current_task.get_instance_state() + self.current_task_number = 0 + self.allow_reset = False + self.setup_next_task() + return {'success': True, 'html': self.get_html_nonsystem()} + + def get_instance_state(self): + """ + Returns the current instance state. The module can be recreated from the instance state. + Input: None + Output: A dictionary containing the instance state. + """ + + state = { + 'version': self.STATE_VERSION, + 'current_task_number': self.current_task_number, + 'state': self.state, + 'task_states': self.task_states, + 'attempts': self.attempts, + 'ready_to_reset': self.allow_reset, + } + + return json.dumps(state) + + def get_status(self): + """ + Gets the status panel to be displayed at the top right. + Input: None + Output: The status html to be rendered + """ + status = [] + for i in xrange(0, self.current_task_number + 1): + task_data = self.get_last_response(i) + task_data.update({'task_number': i + 1}) + status.append(task_data) + context = {'status_list': status} + status_html = self.system.render_template("combined_open_ended_status.html", context) + + return status_html + + +class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): + """ + Module for adding combined open ended questions + """ + mako_template = "widgets/html-edit.html" + module_class = CombinedOpenEndedModule + filename_extension = "xml" + + stores_state = True + has_score = True + template_dir_name = "combinedopenended" + + js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} + js_module_name = "HTMLEditingDescriptor" + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the individual tasks, the rubric, and the prompt, and parse + + Returns: + { + 'rubric': 'some-html', + 'prompt': 'some-html', + 'task_xml': dictionary of xml strings, + } + """ + expected_children = ['task', 'rubric', 'prompt'] + for child in expected_children: + if len(xml_object.xpath(child)) == 0: + raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child)) + + def parse_task(k): + """Assumes that xml_object has child k""" + return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] + + def parse(k): + """Assumes that xml_object has child k""" + return xml_object.xpath(k)[0] + + return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} + + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + elt = etree.Element('combinedopenended') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + elt.append(child_node) + + for child in ['task']: + add_child(child) + + return elt \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py new file mode 100644 index 0000000000..0b2ca1ca2c --- /dev/null +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -0,0 +1,129 @@ +from mitxmako.shortcuts import render_to_string +import logging +from lxml import etree + +log=logging.getLogger(__name__) + +class CombinedOpenEndedRubric: + + @staticmethod + def render_rubric(rubric_xml): + try: + rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml) + html = render_to_string('open_ended_rubric.html', {'rubric_categories' : rubric_categories}) + except: + log.exception("Could not parse the rubric.") + html = rubric_xml + return html + + @staticmethod + def extract_rubric_categories(element): + ''' + Contstruct a list of categories such that the structure looks like: + [ { category: "Category 1 Name", + options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}] + }, + { category: "Category 2 Name", + options: [{text: "Option 1 Name", points: 0}, + {text: "Option 2 Name", points: 1}, + {text: "Option 3 Name", points: 2]}] + + ''' + element = etree.fromstring(element) + categories = [] + for category in element: + if category.tag != 'category': + raise Exception("[capa.inputtypes.extract_categories] Expected a tag: got {0} instead".format(category.tag)) + else: + categories.append(CombinedOpenEndedRubric.extract_category(category)) + return categories + + @staticmethod + def extract_category(category): + ''' + construct an individual category + {category: "Category 1 Name", + options: [{text: "Option 1 text", points: 1}, + {text: "Option 2 text", points: 2}]} + + all sorting and auto-point generation occurs in this function + ''' + + has_score=False + descriptionxml = category[0] + scorexml = category[1] + if scorexml.tag == "option": + optionsxml = category[1:] + else: + optionsxml = category[2:] + has_score=True + + # parse description + if descriptionxml.tag != 'description': + raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) + + if has_score: + if scorexml.tag != 'score': + raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag)) + + for option in optionsxml: + if option.tag != "option": + raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + + description = descriptionxml.text + + if has_score: + score = int(scorexml.text) + else: + score = 0 + + cur_points = 0 + options = [] + autonumbering = True + # parse options + for option in optionsxml: + if option.tag != 'option': + raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + else: + pointstr = option.get("points") + if pointstr: + autonumbering = False + # try to parse this into an int + try: + points = int(pointstr) + except ValueError: + raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) + elif autonumbering: + # use the generated one if we're in the right mode + points = cur_points + cur_points = cur_points + 1 + else: + raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.") + optiontext = option.text + selected = False + if has_score: + if points == score: + selected = True + options.append({'text': option.text, 'points': points, 'selected' : selected}) + + # sort and check for duplicates + options = sorted(options, key=lambda option: option['points']) + CombinedOpenEndedRubric.validate_options(options) + + return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score} + + @staticmethod + def validate_options(options): + ''' + Validates a set of options. This can and should be extended to filter out other bad edge cases + ''' + if len(options) == 0: + raise Exception("[extract_category]: no options associated with this category") + if len(options) == 1: + return + prev = options[0]['points'] + for option in options[1:]: + if prev == option['points']: + raise Exception("[extract_category]: found duplicate point values between two different options") + else: + prev = option['points'] \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py index 1b9da9fb06..697499b65e 100644 --- a/common/lib/xmodule/xmodule/course_module.py +++ b/common/lib/xmodule/xmodule/course_module.py @@ -1,9 +1,9 @@ -from fs.errors import ResourceNotFoundError import logging from lxml import etree -from path import path # NOTE (THK): Only used for detecting presence of syllabus +from path import path # NOTE (THK): Only used for detecting presence of syllabus import requests import time +from datetime import datetime from xmodule.util.decorators import lazyproperty from xmodule.graders import load_grading_policy @@ -13,6 +13,7 @@ from xmodule.timeparse import parse_time, stringify_time log = logging.getLogger(__name__) + class CourseDescriptor(SequenceDescriptor): module_class = SequenceModule @@ -115,7 +116,8 @@ class CourseDescriptor(SequenceDescriptor): """Parse the policy specified in policy_str, and save it""" try: self._grading_policy = load_grading_policy(policy_str) - except: + except Exception, err: + log.exception('Failed to load grading policy:') self.system.error_tracker("Failed to load grading policy") # Setting this to an empty dictionary will lead to errors when # grading needs to happen, but should allow course staff to see @@ -179,6 +181,38 @@ class CourseDescriptor(SequenceDescriptor): def show_calculator(self): return self.metadata.get("show_calculator", None) == "Yes" + @property + def is_new(self): + # The course is "new" if either if the metadata flag is_new is + # true or if the course has not started yet + flag = self.metadata.get('is_new', None) + if flag is None: + return self.days_until_start > 1 + elif isinstance(flag, basestring): + return flag.lower() in ['true', 'yes', 'y'] + else: + return bool(flag) + + @property + def days_until_start(self): + def convert_to_datetime(timestamp): + return datetime.fromtimestamp(time.mktime(timestamp)) + + start_date = convert_to_datetime(self.start) + + # Try to use course advertised date if we can parse it + advertised_start = self.metadata.get('advertised_start', None) + if advertised_start: + try: + start_date = datetime.strptime(advertised_start, + "%Y-%m-%dT%H:%M") + except ValueError: + pass # Invalid date, keep using 'start'' + + now = convert_to_datetime(time.gmtime()) + days_until_start = (start_date - now).days + return days_until_start + @lazyproperty def grading_context(self): """ @@ -258,7 +292,6 @@ class CourseDescriptor(SequenceDescriptor): raise ValueError("{0} is not a course location".format(loc)) return "/".join([loc.org, loc.course, loc.name]) - @property def id(self): """Return the course_id for this course""" @@ -266,7 +299,20 @@ class CourseDescriptor(SequenceDescriptor): @property def start_date_text(self): - displayed_start = self._try_parse_time('advertised_start') or self.start + parsed_advertised_start = self._try_parse_time('advertised_start') + + # If the advertised start isn't a real date string, we assume it's free + # form text... + if parsed_advertised_start is None and \ + ('advertised_start' in self.metadata): + return self.metadata['advertised_start'] + + displayed_start = parsed_advertised_start or self.start + + # If we have neither an advertised start or a real start, just return TBD + if not displayed_start: + return "TBD" + return time.strftime("%b %d, %Y", displayed_start) @property @@ -424,4 +470,3 @@ class CourseDescriptor(SequenceDescriptor): @property def org(self): return self.location.org - diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index b25ab3d3a2..929b6dcb48 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -297,6 +297,51 @@ section.problem { float: left; } } + + } + .evaluation { + p { + margin-bottom: 4px; + } + } + + + .feedback-on-feedback { + height: 100px; + margin-right: 20px; + } + + .evaluation-response { + header { + text-align: right; + a { + font-size: .85em; + } + } + } + + .evaluation-scoring { + .scoring-list { + list-style-type: none; + margin-left: 3px; + + li { + &:first-child { + margin-left: 0px; + } + display:inline; + margin-left: 50px; + + label { + font-size: .9em; + } + + } + } + + } + .submit-message-container { + margin: 10px 0px ; } } @@ -634,6 +679,10 @@ section.problem { color: #2C2C2C; font-family: monospace; font-size: 1em; + padding-top: 10px; + header { + font-size: 1.4em; + } .shortform { font-weight: bold; diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss new file mode 100644 index 0000000000..a58e30f1e2 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss @@ -0,0 +1,626 @@ +h2 { + margin-top: 0; + margin-bottom: 15px; + + &.problem-header { + section.staff { + margin-top: 30px; + font-size: 80%; + } + } + + @media print { + display: block; + width: auto; + border-right: 0; + } +} + +.inline-error { + color: darken($error-red, 10%); +} + +section.combined-open-ended { + @include clearfix; + .status-container + { + float:right; + width:40%; + } + .item-container + { + float:left; + width: 53%; + padding-bottom: 50px; + } + + .result-container + { + float:left; + width: 93%; + position:relative; + } +} + +section.combined-open-ended-status { + + .statusitem { + background-color: #FAFAFA; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + } + + .statusitem-current { + background-color: #BEBEBE; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + } + + span { + &.unanswered { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + position: relative; + width: 14px; + float: right; + } + + &.correct { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + position: relative; + width: 25px; + float: right; + } + + &.incorrect { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + position: relative; + float: right; + } + } +} + +div.result-container { + + .evaluation { + p { + margin-bottom: 1px; + } + } + + .feedback-on-feedback { + height: 100px; + margin-right: 0px; + } + + .evaluation-response { + header { + text-align: right; + a { + font-size: .85em; + } + } + } + .evaluation-scoring { + .scoring-list { + list-style-type: none; + margin-left: 3px; + + li { + &:first-child { + margin-left: 0px; + } + display:inline; + margin-left: 0px; + + label { + font-size: .9em; + } + } + } + } + .submit-message-container { + margin: 10px 0px ; + } + + .external-grader-message { + section { + padding-left: 20px; + background-color: #FAFAFA; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + header { + font-size: 1.4em; + } + + .shortform { + font-weight: bold; + } + + .longform { + padding: 0px; + margin: 0px; + + .result-errors { + margin: 5px; + padding: 10px 10px 10px 40px; + background: url('../images/incorrect-icon.png') center left no-repeat; + li { + color: #B00; + } + } + + .result-output { + margin: 5px; + padding: 20px 0px 15px 50px; + border-top: 1px solid #DDD; + border-left: 20px solid #FAFAFA; + + h4 { + font-family: monospace; + font-size: 1em; + } + + dl { + margin: 0px; + } + + dt { + margin-top: 20px; + } + + dd { + margin-left: 24pt; + } + } + + .result-correct { + background: url('../images/correct-icon.png') left 20px no-repeat; + .result-actual-output { + color: #090; + } + } + + .result-incorrect { + background: url('../images/incorrect-icon.png') left 20px no-repeat; + .result-actual-output { + color: #B00; + } + } + + .markup-text{ + margin: 5px; + padding: 20px 0px 15px 50px; + border-top: 1px solid #DDD; + border-left: 20px solid #FAFAFA; + + bs { + color: #BB0000; + } + + bg { + color: #BDA046; + } + } + } + } + } +} + +div.result-container, section.open-ended-child { + .rubric { + tr { + margin:10px 0px; + height: 100%; + } + td { + padding: 20px 0px; + margin: 10px 0px; + height: 100%; + } + th { + padding: 5px; + margin: 5px; + } + label, + .view-only { + margin:10px; + position: relative; + padding: 15px; + width: 200px; + height:100%; + display: inline-block; + min-height: 50px; + min-width: 50px; + background-color: #CCC; + font-size: 1em; + } + .grade { + position: absolute; + bottom:0px; + right:0px; + margin:10px; + } + .selected-grade { + background: #666; + color: white; + } + input[type=radio]:checked + label { + background: #666; + color: white; } + input[class='score-selection'] { + display: none; + } + } +} + +section.open-ended-child { + @media print { + display: block; + width: auto; + padding: 0; + + canvas, img { + page-break-inside: avoid; + } + } + + .inline { + display: inline; + } + + ol.enumerate { + li { + &:before { + content: " "; + display: block; + height: 0; + visibility: hidden; + } + } + } + + .solution-span { + > span { + margin: 20px 0; + display: block; + border: 1px solid #ddd; + padding: 9px 15px 20px; + background: #FFF; + position: relative; + @include box-shadow(inset 0 0 0 1px #eee); + @include border-radius(3px); + + &:empty { + display: none; + } + } + } + + p { + &.answer { + margin-top: -2px; + } + &.status { + text-indent: -9999px; + margin: 8px 0 0 10px; + } + } + + div.unanswered { + p.status { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + width: 14px; + } + } + + div.correct, div.ui-icon-check { + p.status { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + width: 25px; + } + + input { + border-color: green; + } + } + + div.processing { + p.status { + @include inline-block(); + background: url('../images/spinner.gif') center center no-repeat; + height: 20px; + width: 20px; + } + + input { + border-color: #aaa; + } + } + + div.incorrect, div.ui-icon-close { + p.status { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + text-indent: -9999px; + } + + input { + border-color: red; + } + } + + > span { + display: block; + margin-bottom: lh(.5); + } + + p.answer { + @include inline-block(); + margin-bottom: 0; + margin-left: 10px; + + &:before { + content: "Answer: "; + font-weight: bold; + display: inline; + + } + &:empty { + &:before { + display: none; + } + } + } + + span { + &.unanswered, &.ui-icon-bullet { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + position: relative; + top: 4px; + width: 14px; + } + + &.processing, &.ui-icon-processing { + @include inline-block(); + background: url('../images/spinner.gif') center center no-repeat; + height: 20px; + position: relative; + top: 6px; + width: 25px; + } + + &.correct, &.ui-icon-check { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + position: relative; + top: 6px; + width: 25px; + } + + &.incorrect, &.ui-icon-close { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + position: relative; + top: 6px; + } + } + + .reload + { + float:right; + margin: 10px; + } + + + .grader-status { + padding: 9px; + background: #F6F6F6; + border: 1px solid #ddd; + border-top: 0; + margin-bottom: 20px; + @include clearfix; + + span { + text-indent: -9999px; + overflow: hidden; + display: block; + float: left; + margin: -7px 7px 0 0; + } + + .grading { + background: url('../images/info-icon.png') left center no-repeat; + padding-left: 25px; + text-indent: 0px; + margin: 0px 7px 0 0; + } + + p { + line-height: 20px; + text-transform: capitalize; + margin-bottom: 0; + float: left; + } + + &.file { + background: #FFF; + margin-top: 20px; + padding: 20px 0 0 0; + + border: { + top: 1px solid #eee; + right: 0; + bottom: 0; + left: 0; + } + + p.debug { + display: none; + } + + input { + float: left; + } + } + + } + + form.option-input { + margin: -10px 0 20px; + padding-bottom: 20px; + + select { + margin-right: flex-gutter(); + } + } + + ul { + list-style: disc outside none; + margin-bottom: lh(); + margin-left: .75em; + margin-left: .75rem; + } + + ol { + list-style: decimal outside none; + margin-bottom: lh(); + margin-left: .75em; + margin-left: .75rem; + } + + dl { + line-height: 1.4em; + } + + dl dt { + font-weight: bold; + } + + dl dd { + margin-bottom: 0; + } + + dd { + margin-left: .5em; + margin-left: .5rem; + } + + li { + line-height: 1.4em; + margin-bottom: lh(.5); + + &:last-child { + margin-bottom: 0; + } + } + + p { + margin-bottom: lh(); + } + + hr { + background: #ddd; + border: none; + clear: both; + color: #ddd; + float: none; + height: 1px; + margin: 0 0 .75rem; + width: 100%; + } + + .hidden { + display: none; + visibility: hidden; + } + + #{$all-text-inputs} { + display: inline; + width: auto; + } + + section.action { + margin-top: 20px; + + input.save { + @extend .blue-button; + } + + .submission_feedback { + // background: #F3F3F3; + // border: 1px solid #ddd; + // @include border-radius(3px); + // padding: 8px 12px; + // margin-top: 10px; + @include inline-block; + font-style: italic; + margin: 8px 0 0 10px; + color: #777; + -webkit-font-smoothing: antialiased; + } + } + + .detailed-solution { + > p:first-child { + font-size: 0.9em; + font-weight: bold; + font-style: normal; + text-transform: uppercase; + color: #AAA; + } + + p:last-child { + margin-bottom: 0; + } + } + + div.open-ended-alert { + padding: 8px 12px; + border: 1px solid #EBE8BF; + border-radius: 3px; + background: #FFFCDD; + font-size: 0.9em; + margin-top: 10px; + } + + div.capa_reset { + padding: 25px; + border: 1px solid $error-red; + background-color: lighten($error-red, 25%); + border-radius: 3px; + font-size: 1em; + margin-top: 10px; + margin-bottom: 10px; + } + .capa_reset>h2 { + color: #AA0000; + } + .capa_reset li { + font-size: 0.9em; + } + +} diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index 8f885dc9d2..a183cec98b 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -316,7 +316,7 @@ class AssignmentFormatGrader(CourseGrader): min_count = 2 would produce the labels "Assignment 3", "Assignment 4" """ - def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, starting_index=1): + def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, hide_average=False, starting_index=1): self.type = type self.min_count = min_count self.drop_count = drop_count @@ -325,6 +325,7 @@ class AssignmentFormatGrader(CourseGrader): self.short_label = short_label or self.type self.show_only_average = show_only_average self.starting_index = starting_index + self.hide_average = hide_average def grade(self, grade_sheet, generate_random_scores=False): def totalWithDrops(breakdown, drop_count): @@ -385,7 +386,8 @@ class AssignmentFormatGrader(CourseGrader): if self.show_only_average: breakdown = [] - breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True}) + if not self.hide_average: + breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True}) return {'percent': total_percent, 'section_breakdown': breakdown, diff --git a/common/lib/xmodule/xmodule/gst_module.py b/common/lib/xmodule/xmodule/gst_module.py new file mode 100644 index 0000000000..3b8d96ee81 --- /dev/null +++ b/common/lib/xmodule/xmodule/gst_module.py @@ -0,0 +1,195 @@ +""" +Graphical slider tool module is ungraded xmodule used by students to +understand functional dependencies. +""" + +import json +import logging +from lxml import etree +from lxml import html +import xmltodict + +from xmodule.mako_module import MakoModuleDescriptor +from xmodule.xml_module import XmlDescriptor +from xmodule.x_module import XModule +from xmodule.stringify import stringify_children +from pkg_resources import resource_string + + +log = logging.getLogger(__name__) + + +class GraphicalSliderToolModule(XModule): + ''' Graphical-Slider-Tool Module + ''' + + js = { + 'js': [ + # 3rd party libraries used by graphic slider tool. + # TODO - where to store them - outside xmodule? + resource_string(__name__, 'js/src/graphical_slider_tool/jstat-1.0.0.min.js'), + + resource_string(__name__, 'js/src/graphical_slider_tool/gst_main.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/state.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/logme.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/general_methods.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/sliders.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/inputs.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/graph.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/el_output.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/g_label_el_output.js'), + + resource_string(__name__, 'js/src/graphical_slider_tool/gst.js') + ] + } + js_module_name = "GraphicalSliderTool" + + def __init__(self, system, location, definition, descriptor, instance_state=None, + shared_state=None, **kwargs): + """ + For XML file format please look at documentation. TODO - receive + information where to store XML documentation. + """ + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + def get_html(self): + """ Renders parameters to template. """ + + # these 3 will be used in class methods + self.html_id = self.location.html_id() + self.html_class = self.location.category + self.configuration_json = self.build_configuration_json() + params = { + 'gst_html': self.substitute_controls(self.definition['render']), + 'element_id': self.html_id, + 'element_class': self.html_class, + 'configuration_json': self.configuration_json + } + self.content = self.system.render_template( + 'graphical_slider_tool.html', params) + return self.content + + def substitute_controls(self, html_string): + """ Substitutes control elements (slider, textbox and plot) in + html_string with their divs. Html_string is content of tag + inside tag. Documentation on how information in + tag is organized and processed is located in: + mitx/docs/build/html/graphical_slider_tool.html. + + Args: + html_string: content of tag, with controls as xml tags, + e.g. . + + Returns: + html_string with control tags replaced by proper divs + ( ->
) + """ + + xml = html.fromstring(html_string) + + #substitute plot, if presented + plot_div = '
' + plot_el = xml.xpath('//plot') + if plot_el: + plot_el = plot_el[0] + plot_el.getparent().replace(plot_el, html.fromstring( + plot_div.format(element_class=self.html_class, + element_id=self.html_id, + style=plot_el.get('style', "")))) + + #substitute sliders + slider_div = '
\ +
' + slider_els = xml.xpath('//slider') + for slider_el in slider_els: + slider_el.getparent().replace(slider_el, html.fromstring( + slider_div.format(element_class=self.html_class, + element_id=self.html_id, + var=slider_el.get('var', ""), + style=slider_el.get('style', "")))) + + # substitute inputs aka textboxes + input_div = '' + input_els = xml.xpath('//textbox') + for input_index, input_el in enumerate(input_els): + input_el.getparent().replace(input_el, html.fromstring( + input_div.format(element_class=self.html_class, + element_id=self.html_id, + var=input_el.get('var', ""), + style=input_el.get('style', ""), + input_index=input_index))) + + return html.tostring(xml) + + def build_configuration_json(self): + """Creates json element from xml element (with aim to transfer later + directly to javascript via hidden field in template). Steps: + + 1. Convert xml tree to python dict. + + 2. Dump dict to json. + + """ + # added for interface compatibility with xmltodict.parse + # class added for javascript's part purposes + return json.dumps(xmltodict.parse('' + self.definition['configuration'] + '')) + + +class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): + module_class = GraphicalSliderToolModule + template_dir_name = 'graphical_slider_tool' + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the data into dictionary. + + Args: + xml_object: xml from file. + + Returns: + dict + """ + # check for presense of required tags in xml + expected_children_level_0 = ['render', 'configuration'] + for child in expected_children_level_0: + if len(xml_object.xpath(child)) != 1: + raise ValueError("Graphical Slider Tool definition must include \ + exactly one '{0}' tag".format(child)) + + expected_children_level_1 = ['functions'] + for child in expected_children_level_1: + if len(xml_object.xpath('configuration')[0].xpath(child)) != 1: + raise ValueError("Graphical Slider Tool definition must include \ + exactly one '{0}' tag".format(child)) + # finished + + def parse(k): + """Assumes that xml_object has child k""" + return stringify_children(xml_object.xpath(k)[0]) + return { + 'render': parse('render'), + 'configuration': parse('configuration') + } + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + xml_object = etree.Element('graphical_slider_tool') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + xml_object.append(child_node) + + for child in ['render', 'configuration']: + add_child(child) + + return xml_object diff --git a/common/lib/xmodule/xmodule/js/src/collapsible.coffee b/common/lib/xmodule/xmodule/js/src/collapsible.coffee index 18a186e106..e414935784 100644 --- a/common/lib/xmodule/xmodule/js/src/collapsible.coffee +++ b/common/lib/xmodule/xmodule/js/src/collapsible.coffee @@ -22,7 +22,7 @@ class @Collapsible if $(event.target).text() == 'See full output' new_text = 'Hide output' else - new_text = 'See full ouput' + new_text = 'See full output' $(event.target).text(new_text) @toggleHint: (event) => diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee new file mode 100644 index 0000000000..2cbba143a3 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -0,0 +1,282 @@ +class @CombinedOpenEnded + constructor: (element) -> + @element=element + @reinitialize(element) + + reinitialize: (element) -> + @wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule') + @el = $(element).find('section.combined-open-ended') + @combined_open_ended=$(element).find('section.combined-open-ended') + @id = @el.data('id') + @ajax_url = @el.data('ajax-url') + @state = @el.data('state') + @task_count = @el.data('task-count') + @task_number = @el.data('task-number') + + @allow_reset = @el.data('allow_reset') + @reset_button = @$('.reset-button') + @reset_button.click @reset + @next_problem_button = @$('.next-step-button') + @next_problem_button.click @next_problem + + @show_results_button=@$('.show-results-button') + @show_results_button.click @show_results + + # valid states: 'initial', 'assessing', 'post_assessment', 'done' + Collapsible.setCollapsibles(@el) + @submit_evaluation_button = $('.submit-evaluation-button') + @submit_evaluation_button.click @message_post + + @results_container = $('.result-container') + + # Where to put the rubric once we load it + @el = $(element).find('section.open-ended-child') + @errors_area = @$('.error') + @answer_area = @$('textarea.answer') + + @rubric_wrapper = @$('.rubric-wrapper') + @hint_wrapper = @$('.hint-wrapper') + @message_wrapper = @$('.message-wrapper') + @submit_button = @$('.submit-button') + @child_state = @el.data('state') + @child_type = @el.data('child-type') + if @child_type=="openended" + @skip_button = @$('.skip-button') + @skip_button.click @skip_post_assessment + + @open_ended_child= @$('.open-ended-child') + + @find_assessment_elements() + @find_hint_elements() + + @rebind() + + # locally scoped jquery. + $: (selector) -> + $(selector, @el) + + show_results: (event) => + status_item = $(event.target).parent().parent() + status_number = status_item.data('status-number') + data = {'task_number' : status_number} + $.postWithPrefix "#{@ajax_url}/get_results", data, (response) => + if response.success + @results_container.after(response.html).remove() + @results_container = $('div.result-container') + @submit_evaluation_button = $('.submit-evaluation-button') + @submit_evaluation_button.click @message_post + Collapsible.setCollapsibles(@results_container) + else + @errors_area.html(response.error) + + message_post: (event)=> + Logger.log 'message_post', @answers + external_grader_message=$(event.target).parent().parent().parent() + evaluation_scoring = $(event.target).parent() + + fd = new FormData() + feedback = evaluation_scoring.find('textarea.feedback-on-feedback')[0].value + submission_id = external_grader_message.find('input.submission_id')[0].value + grader_id = external_grader_message.find('input.grader_id')[0].value + score = evaluation_scoring.find("input:radio[name='evaluation-score']:checked").val() + + fd.append('feedback', feedback) + fd.append('submission_id', submission_id) + fd.append('grader_id', grader_id) + if(!score) + @gentle_alert "You need to pick a rating before you can submit." + return + else + fd.append('score', score) + + settings = + type: "POST" + data: fd + processData: false + contentType: false + success: (response) => + @gentle_alert response.msg + $('section.evaluation').slideToggle() + @message_wrapper.html(response.message_html) + + $.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings) + + + rebind: () => + # rebind to the appropriate function for the current state + @submit_button.unbind('click') + @submit_button.show() + @reset_button.hide() + @next_problem_button.hide() + @hint_area.attr('disabled', false) + + if @child_type=="openended" + @skip_button.hide() + if @allow_reset=="True" + @reset_button.show() + @submit_button.hide() + @answer_area.attr("disabled", true) + @hint_area.attr('disabled', true) + else if @child_state == 'initial' + @answer_area.attr("disabled", false) + @submit_button.prop('value', 'Submit') + @submit_button.click @save_answer + else if @child_state == 'assessing' + @answer_area.attr("disabled", true) + @submit_button.prop('value', 'Submit assessment') + @submit_button.click @save_assessment + if @child_type == "openended" + @submit_button.hide() + @queueing() + else if @child_state == 'post_assessment' + if @child_type=="openended" + @skip_button.show() + @skip_post_assessment() + @answer_area.attr("disabled", true) + @submit_button.prop('value', 'Submit post-assessment') + if @child_type=="selfassessment" + @submit_button.click @save_hint + else + @submit_button.click @message_post + else if @child_state == 'done' + @answer_area.attr("disabled", true) + @hint_area.attr('disabled', true) + @submit_button.hide() + if @child_type=="openended" + @skip_button.hide() + if @task_number<@task_count + @next_problem() + else + @reset_button.show() + + + find_assessment_elements: -> + @assessment = @$('select.assessment') + + find_hint_elements: -> + @hint_area = @$('textarea.post_assessment') + + save_answer: (event) => + event.preventDefault() + if @child_state == 'initial' + data = {'student_answer' : @answer_area.val()} + $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) => + if response.success + @rubric_wrapper.html(response.rubric_html) + @child_state = 'assessing' + @find_assessment_elements() + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + save_assessment: (event) => + event.preventDefault() + if @child_state == 'assessing' + data = {'assessment' : @assessment.find(':selected').text()} + $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => + if response.success + @child_state = response.state + + if @child_state == 'post_assessment' + @hint_wrapper.html(response.hint_html) + @find_hint_elements() + else if @child_state == 'done' + @message_wrapper.html(response.message_html) + + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + save_hint: (event) => + event.preventDefault() + if @child_state == 'post_assessment' + data = {'hint' : @hint_area.val()} + + $.postWithPrefix "#{@ajax_url}/save_post_assessment", data, (response) => + if response.success + @message_wrapper.html(response.message_html) + @child_state = 'done' + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + skip_post_assessment: => + if @child_state == 'post_assessment' + + $.postWithPrefix "#{@ajax_url}/skip_post_assessment", {}, (response) => + if response.success + @child_state = 'done' + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + reset: (event) => + event.preventDefault() + if @child_state == 'done' or @allow_reset=="True" + $.postWithPrefix "#{@ajax_url}/reset", {}, (response) => + if response.success + @answer_area.val('') + @rubric_wrapper.html('') + @hint_wrapper.html('') + @message_wrapper.html('') + @child_state = 'initial' + @combined_open_ended.after(response.html).remove() + @allow_reset="False" + @reinitialize(@element) + @rebind() + @reset_button.hide() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + next_problem: => + if @child_state == 'done' + $.postWithPrefix "#{@ajax_url}/next_problem", {}, (response) => + if response.success + @answer_area.val('') + @rubric_wrapper.html('') + @hint_wrapper.html('') + @message_wrapper.html('') + @child_state = 'initial' + @combined_open_ended.after(response.html).remove() + @reinitialize(@element) + @rebind() + @next_problem_button.hide() + if !response.allow_reset + @gentle_alert "Moved to next step." + else + @gentle_alert "Your score did not meet the criteria to move to the next step." + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + gentle_alert: (msg) => + if @el.find('.open-ended-alert').length + @el.find('.open-ended-alert').remove() + alert_elem = "
" + msg + "
" + @el.find('.open-ended-action').after(alert_elem) + @el.find('.open-ended-alert').css(opacity: 0).animate(opacity: 1, 700) + + queueing: => + if @child_state=="assessing" and @child_type=="openended" + if window.queuePollerID # Only one poller 'thread' per Problem + window.clearTimeout(window.queuePollerID) + window.queuePollerID = window.setTimeout(@poll, 10000) + + poll: => + $.postWithPrefix "#{@ajax_url}/check_for_score", (response) => + if response.state == "done" or response.state=="post_assessment" + delete window.queuePollerID + location.reload() + else + window.queuePollerID = window.setTimeout(@poll, 10000) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js new file mode 100644 index 0000000000..3175aae3f0 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js @@ -0,0 +1,139 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('ElOutput', ['logme'], function (logme) { + + return ElOutput; + + function ElOutput(config, state) { + + if ($.isPlainObject(config.functions.function)) { + processFuncObj(config.functions.function); + } else if ($.isArray(config.functions.function)) { + (function (c1) { + while (c1 < config.functions.function.length) { + if ($.isPlainObject(config.functions.function[c1])) { + processFuncObj(config.functions.function[c1]); + } + + c1 += 1; + } + }(0)); + } + + return; + + function processFuncObj(obj) { + var paramNames, funcString, func, el, disableAutoReturn, updateOnEvent; + + // We are only interested in functions that are meant for output to an + // element. + if ( + (typeof obj['@output'] !== 'string') || + ((obj['@output'].toLowerCase() !== 'element') && (obj['@output'].toLowerCase() !== 'none')) + ) { + return; + } + + if (typeof obj['@el_id'] !== 'string') { + logme('ERROR: You specified "output" as "element", but did not spify "el_id".'); + + return; + } + + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function body is not defined.'); + + return; + } + + updateOnEvent = 'slide'; + if ( + (obj.hasOwnProperty('@update_on') === true) && + (typeof obj['@update_on'] === 'string') && + ((obj['@update_on'].toLowerCase() === 'slide') || (obj['@update_on'].toLowerCase() === 'change')) + ) { + updateOnEvent = obj['@update_on'].toLowerCase(); + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = obj['#text']; + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Make sure that all HTML entities are converted to their proper + // ASCII text equivalents. + funcString = $('
').html(funcString).text(); + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return; + } + + paramNames.pop(); + + if (obj['@output'].toLowerCase() !== 'none') { + el = $('#' + obj['@el_id']); + + if (el.length !== 1) { + logme( + 'ERROR: DOM element with ID "' + obj['@el_id'] + '" ' + + 'not found. Dynamic element not created.' + ); + + return; + } + + el.html(func.apply(window, state.getAllParameterValues())); + } else { + el = null; + func.apply(window, state.getAllParameterValues()); + } + + state.addDynamicEl(el, func, obj['@el_id'], updateOnEvent); + } + + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js new file mode 100644 index 0000000000..13c9dd3389 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js @@ -0,0 +1,113 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('GLabelElOutput', ['logme'], function (logme) { + return GLabelElOutput; + + function GLabelElOutput(config, state) { + if ($.isPlainObject(config.functions.function)) { + processFuncObj(config.functions.function); + } else if ($.isArray(config.functions.function)) { + (function (c1) { + while (c1 < config.functions.function.length) { + if ($.isPlainObject(config.functions.function[c1])) { + processFuncObj(config.functions.function[c1]); + } + + c1 += 1; + } + }(0)); + } + + return; + + function processFuncObj(obj) { + var paramNames, funcString, func, disableAutoReturn; + + // We are only interested in functions that are meant for output to an + // element. + if ( + (typeof obj['@output'] !== 'string') || + (obj['@output'].toLowerCase() !== 'plot_label') + ) { + return; + } + + if (typeof obj['@el_id'] !== 'string') { + logme('ERROR: You specified "output" as "plot_label", but did not spify "el_id".'); + + return; + } + + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function body is not defined.'); + + return; + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = obj['#text']; + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Make sure that all HTML entities are converted to their proper + // ASCII text equivalents. + funcString = $('
').html(funcString).text(); + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return; + } + + paramNames.pop(); + + state.plde.push({ + 'elId': obj['@el_id'], + 'func': func + }); + } + + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js new file mode 100644 index 0000000000..9cdd4fff0f --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js @@ -0,0 +1,23 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('GeneralMethods', [], function () { + if (!String.prototype.trim) { + // http://blog.stevenlevithan.com/archives/faster-trim-javascript + String.prototype.trim = function trim(str) { + return str.replace(/^\s\s*/, '').replace(/\s\s*$/, ''); + }; + } + + return { + 'module_name': 'GeneralMethods', + 'module_status': 'OK' + }; +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js new file mode 100644 index 0000000000..2520f0b12f --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js @@ -0,0 +1,1496 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('Graph', ['logme'], function (logme) { + + return Graph; + + function Graph(gstId, config, state) { + var plotDiv, dataSeries, functions, xaxis, yaxis, numPoints, xrange, + asymptotes, movingLabels, xTicksNames, yTicksNames, graphBarWidth, graphBarAlign; + + // We need plot configuration settings. Without them we can't continue. + if ($.isPlainObject(config.plot) === false) { + return; + } + + // We must have a graph container DIV element available in order to + // proceed. + plotDiv = $('#' + gstId + '_plot'); + if (plotDiv.length === 0) { + logme('ERROR: Could not find the plot DIV with ID "' + gstId + '_plot".'); + + return; + } + + if (plotDiv.width() === 0) { + plotDiv.width(300); + } + + // Sometimes, when height is not explicitly set via CSS (or by some + // other means), it is 0 pixels by default. When Flot will try to plot + // a graph in this DIV with 0 height, then it will raise an error. To + // prevent this, we will set it to be equal to the width. + if (plotDiv.height() === 0) { + plotDiv.height(plotDiv.width()); + } + + plotDiv.css('position', 'relative'); + + // Configure some settings for the graph. + if (setGraphXRange() === false) { + logme('ERROR: Could not configure the xrange. Will not continue.'); + + return; + } + + if (setGraphAxes() === false) { + logme('ERROR: Could not process configuration for the axes.'); + + return; + } + + graphBarWidth = 1; + graphBarAlign = null; + + getBarWidth(); + getBarAlign(); + + // Get the user defined functions. If there aren't any, don't do + // anything else. + createFunctions(); + + if (functions.length === 0) { + logme('ERROR: No functions were specified, or something went wrong.'); + + return; + } + + if (createMarkingsFunctions() === false) { + return; + } + if (createMovingLabelFunctions() === false) { + return; + } + + // Create the initial graph and plot it for the user to see. + if (generateData() === true) { + updatePlot(); + } + + // Bind an event. Whenever some constant changes, the graph will be + // redrawn + state.bindUpdatePlotEvent(plotDiv, onUpdatePlot); + + return; + + function getBarWidth() { + if (config.plot.hasOwnProperty('bar_width') === false) { + return; + } + + if (typeof config.plot.bar_width !== 'string') { + logme('ERROR: The parameter config.plot.bar_width must be a string.'); + + return; + } + + if (isFinite(graphBarWidth = parseFloat(config.plot.bar_width)) === false) { + logme('ERROR: The parameter config.plot.bar_width is not a valid floating number.'); + graphBarWidth = 1; + + return; + } + + return; + } + + function getBarAlign() { + if (config.plot.hasOwnProperty('bar_align') === false) { + return; + } + + if (typeof config.plot.bar_align !== 'string') { + logme('ERROR: The parameter config.plot.bar_align must be a string.'); + + return; + } + + if ( + (config.plot.bar_align.toLowerCase() !== 'left') && + (config.plot.bar_align.toLowerCase() !== 'center') + ) { + logme('ERROR: Property config.plot.bar_align can be one of "left", or "center".'); + + return; + } + + graphBarAlign = config.plot.bar_align.toLowerCase(); + + return; + } + + function createMovingLabelFunctions() { + var c1, returnStatus; + + returnStatus = true; + movingLabels = []; + + if (config.plot.hasOwnProperty('moving_label') !== true) { + returnStatus = true; + } else if ($.isPlainObject(config.plot.moving_label) === true) { + if (processMovingLabel(config.plot.moving_label) === false) { + returnStatus = false; + } + } else if ($.isArray(config.plot.moving_label) === true) { + for (c1 = 0; c1 < config.plot.moving_label.length; c1++) { + if (processMovingLabel(config.plot.moving_label[c1]) === false) { + returnStatus = false; + } + } + } + + return returnStatus; + } + + function processMovingLabel(obj) { + var labelText, funcString, disableAutoReturn, paramNames, func, + fontWeight, fontColor; + + if (obj.hasOwnProperty('@text') === false) { + logme('ERROR: You did not define a "text" attribute for the moving_label.'); + + return false; + } + if (typeof obj['@text'] !== 'string') { + logme('ERROR: "text" attribute is not a string.'); + + return false; + } + labelText = obj['@text']; + + if (obj.hasOwnProperty('#text') === false) { + logme('ERROR: moving_label is missing function declaration.'); + + return false; + } + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function declaration is not a string.'); + + return false; + } + funcString = obj['#text']; + + fontColor = 'black'; + if ( + (obj.hasOwnProperty('@color') === true) && + (typeof obj['@color'] === 'string') + ) { + fontColor = obj['@color']; + } + + fontWeight = 'normal'; + if ( + (obj.hasOwnProperty('@weight') === true) && + (typeof obj['@weight'] === 'string') + ) { + if ( + (obj['@weight'].toLowerCase() === 'normal') || + (obj['@weight'].toLowerCase() === 'bold') + ) { + fontWeight = obj['@weight']; + } else { + logme('ERROR: Moving label can have a weight property of "normal" or "bold".'); + } + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = $('
').html(funcString).text(); + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from the string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return false; + } + + paramNames.pop(); + + movingLabels.push({ + 'labelText': labelText, + 'func': func, + 'el': null, + 'fontColor': fontColor, + 'fontWeight': fontWeight + }); + + return true; + } + + function createMarkingsFunctions() { + var c1, paramNames, returnStatus; + + returnStatus = true; + + asymptotes = []; + paramNames = state.getAllParameterNames(); + + if ($.isPlainObject(config.plot.asymptote)) { + if (processAsymptote(config.plot.asymptote) === false) { + returnStatus = false; + } + } else if ($.isArray(config.plot.asymptote)) { + for (c1 = 0; c1 < config.plot.asymptote.length; c1 += 1) { + if (processAsymptote(config.plot.asymptote[c1]) === false) { + returnStatus = false; + } + } + } + + return returnStatus; + + // Read configuration options for asymptotes, and store them as + // an array of objects. Each object will have 3 properties: + // + // - color: the color of the asymptote line + // - type: 'x' (vertical), or 'y' (horizontal) + // - func: the function that will generate the value at which + // the asymptote will be plotted; i.e. x = func(), or + // y = func(); for now only horizontal and vertical + // asymptotes are supported + // + // Since each asymptote can have a variable function - function + // that relies on some parameter specified in the config - we will + // generate each asymptote just before we draw the graph. See: + // + // function updatePlot() + // function generateMarkings() + // + // Asymptotes are really thin rectangles implemented via the Flot's + // markings option. + function processAsymptote(asyObj) { + var newAsyObj, funcString, func; + + newAsyObj = {}; + + if (typeof asyObj['@type'] === 'string') { + if (asyObj['@type'].toLowerCase() === 'x') { + newAsyObj.type = 'x'; + } else if (asyObj['@type'].toLowerCase() === 'y') { + newAsyObj.type = 'y'; + } else { + logme('ERROR: Attribute "type" for asymptote can be "x" or "y".'); + + return false; + } + } else { + logme('ERROR: Attribute "type" for asymptote is not specified.'); + + return false; + } + + if (typeof asyObj['#text'] === 'string') { + funcString = asyObj['#text']; + } else { + logme('ERROR: Function body for asymptote is not specified.'); + + return false; + } + + newAsyObj.color = '#000'; + if (typeof asyObj['@color'] === 'string') { + newAsyObj.color = asyObj['@color']; + } + + newAsyObj.label = false; + if ( + (asyObj.hasOwnProperty('@label') === true) && + (typeof asyObj['@label'] === 'string') + ) { + newAsyObj.label = asyObj['@label']; + } + + funcString = $('
').html(funcString).text(); + + disableAutoReturn = asyObj['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme('ERROR: Asymptote function body could not be converted to function object.'); + logme('Error message: "".' + err.message); + + return false; + } + + paramNames.pop(); + + newAsyObj.func = func; + asymptotes.push(newAsyObj); + + return true; + } + } + + function setGraphAxes() { + xaxis = { + 'tickFormatter': null + }; + + if (typeof config.plot['xticks'] === 'string') { + if (processTicks(config.plot['xticks'], xaxis, 'xunits') === false) { + logme('ERROR: Could not process the ticks for x-axis.'); + + return false; + } + } else { + logme('MESSAGE: "xticks" were not specified. Using defaults.'); + + return false; + } + + yaxis = { + 'tickFormatter': null + }; + if (typeof config.plot['yticks'] === 'string') { + if (processTicks(config.plot['yticks'], yaxis, 'yunits') === false) { + logme('ERROR: Could not process the ticks for y-axis.'); + + return false; + } + } else { + logme('MESSAGE: "yticks" were not specified. Using defaults.'); + + return false; + } + + xTicksNames = null; + yTicksNames = null; + + if (checkForTicksNames('x') === false) { + return false; + } + + if (checkForTicksNames('y') === false) { + return false; + } + + return true; + + // + // function checkForTicksNames(axisName) + // + // The parameter "axisName" can be either "x" or "y" (string). Depending on it, the function + // will set "xTicksNames" or "yTicksNames" private variable. + // + // This function does not return anything. It sets the private variable "xTicksNames" ("yTicksNames") + // to the object converted by JSON.parse from the XML parameter "plot.xticks_names" ("plot.yticks_names"). + // If the "plot.xticks_names" ("plot.yticks_names") is missing or it is not a valid JSON string, then + // "xTicksNames" ("yTicksNames") will be set to "null". + // + // Depending on the "xTicksNames" ("yTicksNames") being "null" or an object, the plot will either draw + // number ticks, or use the names specified by the opbject. + // + function checkForTicksNames(axisName) { + var tmpObj; + + if ((axisName !== 'x') && (axisName !== 'y')) { + // This is not an error. This funcion should simply stop executing. + + return true; + } + + if ( + (config.plot.hasOwnProperty(axisName + 'ticks_names') === true) || + (typeof config.plot[axisName + 'ticks_names'] === 'string') + ) { + try { + tmpObj = JSON.parse(config.plot[axisName + 'ticks_names']); + } catch (err) { + logme( + 'ERROR: plot.' + axisName + 'ticks_names is not a valid JSON string.', + 'Error message: "' + err.message + '".' + ); + + return false; + } + + if (axisName === 'x') { + xTicksNames = tmpObj; + xaxis.tickFormatter = xAxisTickFormatter; + } + // At this point, we are certain that axisName = 'y'. + else { + yTicksNames = tmpObj; + yaxis.tickFormatter = yAxisTickFormatter; + } + } + } + + function processTicks(ticksStr, ticksObj, unitsType) { + var ticksBlobs, tempFloat, tempTicks, c1, c2; + + // The 'ticks' setting is a string containing 3 floating-point + // numbers. + ticksBlobs = ticksStr.split(','); + + if (ticksBlobs.length !== 3) { + logme('ERROR: Did not get 3 blobs from ticksStr = "' + ticksStr + '".'); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[0]); + if (isNaN(tempFloat) === false) { + ticksObj.min = tempFloat; + } else { + logme('ERROR: Invalid "min". ticksBlobs[0] = ', ticksBlobs[0]); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[1]); + if (isNaN(tempFloat) === false) { + ticksObj.tickSize = tempFloat; + } else { + logme('ERROR: Invalid "tickSize". ticksBlobs[1] = ', ticksBlobs[1]); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[2]); + if (isNaN(tempFloat) === false) { + ticksObj.max = tempFloat; + } else { + logme('ERROR: Invalid "max". ticksBlobs[2] = ', ticksBlobs[2]); + + return false; + } + + // Is the starting tick to the left of the ending tick (on the + // x-axis)? If not, set default starting and ending tick. + if (ticksObj.min >= ticksObj.max) { + logme('ERROR: Ticks min >= max.'); + + return false; + } + + // Make sure the range makes sense - i.e. that there are at + // least 3 ticks. If not, set a tickSize which will produce + // 11 ticks. tickSize is the spacing between the ticks. + if (ticksObj.tickSize > ticksObj.max - ticksObj.min) { + logme('ERROR: tickSize > max - min.'); + + return false; + } + + // units: change last tick to units + if (typeof config.plot[unitsType] === 'string') { + tempTicks = []; + + for (c1 = ticksObj.min; c1 <= ticksObj.max; c1 += ticksObj.tickSize) { + c2 = roundToPrec(c1, ticksObj.tickSize); + tempTicks.push([c2, c2]); + } + + tempTicks.pop(); + tempTicks.push([ + roundToPrec(ticksObj.max, ticksObj.tickSize), + config.plot[unitsType] + ]); + + ticksObj.tickSize = null; + ticksObj.ticks = tempTicks; + } + + return true; + + function roundToPrec(num, prec) { + var c1, tn1, tn2, digitsBefore, digitsAfter; + + tn1 = Math.abs(num); + tn2 = Math.abs(prec); + + // Find out number of digits BEFORE the decimal point. + c1 = 0; + tn1 = Math.abs(num); + while (tn1 >= 1) { + c1 += 1; + + tn1 /= 10; + } + digitsBefore = c1; + + // Find out number of digits AFTER the decimal point. + c1 = 0; + tn1 = Math.abs(num); + while (Math.round(tn1) !== tn1) { + c1 += 1; + + tn1 *= 10; + } + digitsAfter = c1; + + // For precision, find out number of digits AFTER the + // decimal point. + c1 = 0; + while (Math.round(tn2) !== tn2) { + c1 += 1; + + tn2 *= 10; + } + + // If precision is more than 1 (no digits after decimal + // points). + if (c1 === 0) { + return num; + } + + // If the precision contains digits after the decimal + // point, we apply special rules. + else { + tn1 = Math.abs(num); + + // if (digitsAfter > c1) { + tn1 = tn1.toFixed(c1); + // } else { + // tn1 = tn1.toPrecision(digitsBefore + digitsAfter); + // } + } + + if (num < 0) { + return -tn1; + } + + return tn1; + } + } + } + + function setGraphXRange() { + var xRangeStr, xRangeBlobs, tempNum, allParamNames, funcString, + disableAutoReturn; + + xrange = {}; + + if ($.isPlainObject(config.plot.xrange) === false) { + logme( + 'ERROR: Expected config.plot.xrange to be an object. ' + + 'It is not.' + ); + logme('config.plot.xrange = ', config.plot.xrange); + + return false; + } + + if (config.plot.xrange.hasOwnProperty('min') === false) { + logme( + 'ERROR: Expected config.plot.xrange.min to be ' + + 'present. It is not.' + ); + + return false; + } + + disableAutoReturn = false; + if (typeof config.plot.xrange.min === 'string') { + funcString = config.plot.xrange.min; + } else if ( + ($.isPlainObject(config.plot.xrange.min) === true) && + (config.plot.xrange.min.hasOwnProperty('#text') === true) && + (typeof config.plot.xrange.min['#text'] === 'string') + ) { + funcString = config.plot.xrange.min['#text']; + + disableAutoReturn = + config.plot.xrange.min['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + disableAutoReturn = false; + } else { + disableAutoReturn = true; + } + } else { + logme( + 'ERROR: Could not get a function definition for ' + + 'xrange.min property.' + ); + + return false; + } + + funcString = $('
').html(funcString).text(); + + if (disableAutoReturn === false) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + allParamNames = state.getAllParameterNames(); + + allParamNames.push(funcString); + try { + xrange.min = Function.apply(null, allParamNames); + } catch (err) { + logme( + 'ERROR: could not create a function from the string "' + + funcString + '" for xrange.min.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html( + '
' + 'ERROR IN ' + + 'XML: Could not create a function from the string "' + + funcString + '" for xrange.min.' + '
' + ); + $('#' + gstId).append( + '
' + 'Error ' + + 'message: "' + err.message + '".' + '
' + ); + + return false; + } + allParamNames.pop(); + + if (config.plot.xrange.hasOwnProperty('max') === false) { + logme( + 'ERROR: Expected config.plot.xrange.max to be ' + + 'present. It is not.' + ); + + return false; + } + + disableAutoReturn = false; + if (typeof config.plot.xrange.max === 'string') { + funcString = config.plot.xrange.max; + } else if ( + ($.isPlainObject(config.plot.xrange.max) === true) && + (config.plot.xrange.max.hasOwnProperty('#text') === true) && + (typeof config.plot.xrange.max['#text'] === 'string') + ) { + funcString = config.plot.xrange.max['#text']; + + disableAutoReturn = + config.plot.xrange.max['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + disableAutoReturn = false; + } else { + disableAutoReturn = true; + } + } else { + logme( + 'ERROR: Could not get a function definition for ' + + 'xrange.max property.' + ); + + return false; + } + + funcString = $('
').html(funcString).text(); + + if (disableAutoReturn === false) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + allParamNames.push(funcString); + try { + xrange.max = Function.apply(null, allParamNames); + } catch (err) { + logme( + 'ERROR: could not create a function from the string "' + + funcString + '" for xrange.max.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html( + '
' + 'ERROR IN ' + + 'XML: Could not create a function from the string "' + + funcString + '" for xrange.max.' + '
' + ); + $('#' + gstId).append( + '
' + 'Error message: "' + + err.message + '".' + '
' + ); + + return false; + } + allParamNames.pop(); + + tempNum = parseInt(config.plot.num_points, 10); + if (isFinite(tempNum) === false) { + tempNum = plotDiv.width() / 5.0; + } + + if ( + (tempNum < 2) && + (tempNum > 1000) + ) { + logme( + 'ERROR: Number of points is outside the allowed range ' + + '[2, 1000]' + ); + logme('config.plot.num_points = ' + tempNum); + + return false; + } + + numPoints = tempNum; + + return true; + } + + function createFunctions() { + var c1; + + functions = []; + + if (typeof config.functions === 'undefined') { + logme('ERROR: config.functions is undefined.'); + + return; + } + + if (typeof config.functions.function === 'string') { + + // If just one function string is present. + addFunction(config.functions.function); + + } else if ($.isPlainObject(config.functions.function) === true) { + + // If a function is present, but it also has properties + // defined. + callAddFunction(config.functions.function); + + } else if ($.isArray(config.functions.function)) { + + // If more than one function is defined. + for (c1 = 0; c1 < config.functions.function.length; c1 += 1) { + + // For each definition, we must check if it is a simple + // string definition, or a complex one with properties. + if (typeof config.functions.function[c1] === 'string') { + + // Simple string. + addFunction(config.functions.function[c1]); + + } else if ($.isPlainObject(config.functions.function[c1])) { + + // Properties are present. + callAddFunction(config.functions.function[c1]); + + } + } + } else { + logme('ERROR: config.functions.function is of an unsupported type.'); + + return; + } + + return; + + // This function will reduce code duplication. We have to call + // the function addFunction() several times passing object + // properties as parameters. Rather than writing them out every + // time, we will have a single place where it is done. + function callAddFunction(obj) { + if ( + (obj.hasOwnProperty('@output')) && + (typeof obj['@output'] === 'string') + ) { + + // If this function is meant to be calculated for an + // element then skip it. + if ((obj['@output'].toLowerCase() === 'element') || + (obj['@output'].toLowerCase() === 'none')) { + return; + } + + // If this function is meant to be calculated for a + // dynamic element in a label then skip it. + else if (obj['@output'].toLowerCase() === 'plot_label') { + return; + } + + // It is an error if '@output' is not 'element', + // 'plot_label', or 'graph'. However, if the '@output' + // attribute is omitted, we will not have reached this. + else if (obj['@output'].toLowerCase() !== 'graph') { + logme( + 'ERROR: Function "output" attribute can be ' + + 'either "element", "plot_label", "none" or "graph".' + ); + + return; + } + + } + + // The user did not specify an "output" attribute, or it is + // "graph". + addFunction( + obj['#text'], + obj['@color'], + obj['@line'], + obj['@dot'], + obj['@label'], + obj['@point_size'], + obj['@fill_area'], + obj['@bar'], + obj['@disable_auto_return'] + ); + } + + function addFunction(funcString, color, line, dot, label, + pointSize, fillArea, bar, disableAutoReturn) { + + var newFunctionObject, func, paramNames, c1, rgxp; + + // The main requirement is function string. Without it we can't + // create a function, and the series cannot be calculated. + if (typeof funcString !== 'string') { + return; + } + + // Make sure that any HTML entities that were escaped will be + // unescaped. This is done because if a string with escaped + // HTML entities is passed to the Function() constructor, it + // will break. + funcString = $('
').html(funcString).text(); + + // If the user did not specifically turn off this feature, + // check if the function string contains a 'return', and + // prepend a 'return ' to the string if one, or more, is not + // found. + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Some defaults. If no options are set for the graph, we will + // make sure that at least a line is drawn for a function. + newFunctionObject = { + 'line': true, + 'dot': false, + 'bars': false + }; + + // Get all of the parameter names defined by the user in the + // XML. + paramNames = state.getAllParameterNames(); + + // The 'x' is always one of the function parameters. + paramNames.push('x'); + + // Must make sure that the function body also gets passed to + // the Function constructor. + paramNames.push(funcString); + + // Create the function from the function string, and all of the + // available parameters AND the 'x' variable as it's parameters. + // For this we will use the built-in Function object + // constructor. + // + // If something goes wrong during this step, most + // likely the user supplied an invalid JavaScript function body + // string. In this case we will not proceed. + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from the string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + paramNames.pop(); + + return; + } + + // Return the array back to original state. Remember that it is + // a pointer to original array which is stored in state object. + paramNames.pop(); + paramNames.pop(); + + newFunctionObject['func'] = func; + + if (typeof color === 'string') { + newFunctionObject['color'] = color; + } + + if (typeof line === 'string') { + if (line.toLowerCase() === 'true') { + newFunctionObject['line'] = true; + } else if (line.toLowerCase() === 'false') { + newFunctionObject['line'] = false; + } + } + + if (typeof dot === 'string') { + if (dot.toLowerCase() === 'true') { + newFunctionObject['dot'] = true; + } else if (dot.toLowerCase() === 'false') { + newFunctionObject['dot'] = false; + } + } + + if (typeof pointSize === 'string') { + newFunctionObject['pointSize'] = pointSize; + } + + if (typeof bar === 'string') { + if (bar.toLowerCase() === 'true') { + newFunctionObject['bars'] = true; + } else if (bar.toLowerCase() === 'false') { + newFunctionObject['bars'] = false; + } + } + + if (newFunctionObject['bars'] === true) { + newFunctionObject['line'] = false; + newFunctionObject['dot'] = false; + // To do: See if need to do anything here. + } else if ( + (newFunctionObject['dot'] === false) && + (newFunctionObject['line'] === false) + ) { + newFunctionObject['line'] = true; + } + + if (newFunctionObject['line'] === true) { + if (typeof fillArea === 'string') { + if (fillArea.toLowerCase() === 'true') { + newFunctionObject['fillArea'] = true; + } else if (fillArea.toLowerCase() === 'false') { + newFunctionObject['fillArea'] = false; + } else { + logme('ERROR: The attribute fill_area should be either "true" or "false".'); + logme('fill_area = "' + fillArea + '".'); + + return; + } + } + } + + if (typeof label === 'string') { + + newFunctionObject.specialLabel = false; + newFunctionObject.pldeHash = []; + + // Let's check the label against all of the plde objects. + // plde is an abbreviation for Plot Label Dynamic Elements. + for (c1 = 0; c1 < state.plde.length; c1 += 1) { + rgxp = new RegExp(state.plde[c1].elId, 'g'); + + // If we find a dynamic element in the label, we will + // hash the current plde object, and indicate that this + // is a special label. + if (rgxp.test(label) === true) { + newFunctionObject.specialLabel = true; + newFunctionObject.pldeHash.push(state.plde[c1]); + } + } + + newFunctionObject.label = label; + } else { + newFunctionObject.label = false; + } + + functions.push(newFunctionObject); + } + } + + // The callback that will be called whenever a constant changes (gets + // updated via a slider or a text input). + function onUpdatePlot(event) { + if (generateData() === true) { + updatePlot(); + } + } + + function generateData() { + var c0, c1, c3, functionObj, seriesObj, dataPoints, paramValues, x, y, + start, end, step, numNotUndefined; + + paramValues = state.getAllParameterValues(); + + dataSeries = []; + + for (c0 = 0; c0 < functions.length; c0 += 1) { + functionObj = functions[c0]; + + try { + start = xrange.min.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not determine xrange start.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not determine xrange start from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + try { + end = xrange.max.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not determine xrange end.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not determine xrange end from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + + seriesObj = {}; + dataPoints = []; + + // For counting number of points added. In the end we will + // compare this number to 'numPoints' specified in the config + // JSON. + c1 = 0; + + step = (end - start) / (numPoints - 1); + + // Generate the data points. + for (x = start; x <= end; x += step) { + + // Push the 'x' variable to the end of the parameter array. + paramValues.push(x); + + // We call the user defined function, passing all of the + // available parameter values. Inside this function they + // will be accessible by their names. + try { + y = functionObj.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate data.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not generate data from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + + // Return the paramValues array to how it was before we + // added 'x' variable to the end of it. + paramValues.pop(); + + // Add the generated point to the data points set. + dataPoints.push([x, y]); + + c1 += 1; + + } + + // If the last point did not get included because of rounding + // of floating-point number addition, then we will include it + // manually. + if (c1 != numPoints) { + x = end; + paramValues.push(x); + try { + y = functionObj.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate data.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not generate data from function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + paramValues.pop(); + dataPoints.push([x, y]); + } + + // Put the entire data points set into the series object. + seriesObj.data = dataPoints; + + // See if user defined a specific color for this function. + if (functionObj.hasOwnProperty('color') === true) { + seriesObj.color = functionObj.color; + } + + // See if a user defined a label for this function. + if (functionObj.label !== false) { + if (functionObj.specialLabel === true) { + (function (c1) { + var tempLabel; + + tempLabel = functionObj.label; + + while (c1 < functionObj.pldeHash.length) { + tempLabel = tempLabel.replace( + functionObj.pldeHash[c1].elId, + functionObj.pldeHash[c1].func.apply( + window, + state.getAllParameterValues() + ) + ); + + c1 += 1; + } + + seriesObj.label = tempLabel; + }(0)); + } else { + seriesObj.label = functionObj.label; + } + } + + // Should the data points be connected by a line? + seriesObj.lines = { + 'show': functionObj.line + }; + + if (functionObj.hasOwnProperty('fillArea') === true) { + seriesObj.lines.fill = functionObj.fillArea; + } + + // Should each data point be represented by a point on the + // graph? + seriesObj.points = { + 'show': functionObj.dot + }; + + seriesObj.bars = { + 'show': functionObj.bars, + 'barWidth': graphBarWidth + }; + + if (graphBarAlign !== null) { + seriesObj.bars.align = graphBarAlign; + } + + if (functionObj.hasOwnProperty('pointSize')) { + seriesObj.points.radius = functionObj.pointSize; + } + + // Add the newly created series object to the series set which + // will be plotted by Flot. + dataSeries.push(seriesObj); + } + + if (graphBarAlign === null) { + for (c0 = 0; c0 < numPoints; c0 += 1) { + // Number of points that have a value other than 'undefined' (undefined). + numNotUndefined = 0; + + for (c1 = 0; c1 < dataSeries.length; c1 += 1) { + if (dataSeries[c1].bars.show === false) { + continue; + } + + if (isFinite(parseInt(dataSeries[c1].data[c0][1])) === true) { + numNotUndefined += 1; + } + } + + c3 = 0; + for (c1 = 0; c1 < dataSeries.length; c1 += 1) { + if (dataSeries[c1].bars.show === false) { + continue; + } + + dataSeries[c1].data[c0][0] -= graphBarWidth * (0.5 * numNotUndefined - c3); + + if (isFinite(parseInt(dataSeries[c1].data[c0][1])) === true) { + c3 += 1; + } + } + } + } + + for (c0 = 0; c0 < asymptotes.length; c0 += 1) { + + // If the user defined a label for this asympote, then the + // property 'label' will be a string (in the other case it is + // a boolean value 'false'). We will create an empty data set, + // and add to it a label. This solution is a bit _wrong_ , but + // it will have to do for now. Flot JS does not provide a way + // to add labels to markings, and we use markings to generate + // asymptotes. + if (asymptotes[c0].label !== false) { + dataSeries.push({ + 'data': [], + 'label': asymptotes[c0].label, + 'color': asymptotes[c0].color + }); + } + + } + + return true; + } // End-of: function generateData + + function updatePlot() { + var paramValues, plotObj; + + paramValues = state.getAllParameterValues(); + + if (xaxis.tickFormatter !== null) { + xaxis.ticks = null; + } + + if (yaxis.tickFormatter !== null) { + yaxis.ticks = null; + } + + // Tell Flot to draw the graph to our specification. + plotObj = $.plot( + plotDiv, + dataSeries, + { + 'xaxis': xaxis, + 'yaxis': yaxis, + 'legend': { + + // To show the legend or not. Note, even if 'show' is + // 'true', the legend will only show if labels are + // provided for at least one of the series that are + // going to be plotted. + 'show': true, + + // A floating point number in the range [0, 1]. The + // smaller the number, the more transparent will the + // legend background become. + 'backgroundOpacity': 0 + + }, + 'grid': { + 'markings': generateMarkings() + } + } + ); + + updateMovingLabels(); + + // The first time that the graph gets added to the page, the legend + // is created from scratch. When it appears, MathJax works some + // magic, and all of the specially marked TeX gets rendered nicely. + // The next time when we update the graph, no such thing happens. + // We must ask MathJax to typeset the legend again (well, we will + // ask it to look at our entire graph DIV), the next time it's + // worker queue is available. + MathJax.Hub.Queue([ + 'Typeset', + MathJax.Hub, + plotDiv.attr('id') + ]); + + return; + + function updateMovingLabels() { + var c1, labelCoord, pointOffset; + + for (c1 = 0; c1 < movingLabels.length; c1 += 1) { + if (movingLabels[c1].el === null) { + movingLabels[c1].el = $( + '
' + + movingLabels[c1].labelText + + '
' + ); + movingLabels[c1].el.css('position', 'absolute'); + movingLabels[c1].el.css('color', movingLabels[c1].fontColor); + movingLabels[c1].el.css('font-weight', movingLabels[c1].fontWeight); + movingLabels[c1].el.appendTo(plotDiv); + + movingLabels[c1].elWidth = movingLabels[c1].el.width(); + movingLabels[c1].elHeight = movingLabels[c1].el.height(); + } else { + movingLabels[c1].el.detach(); + movingLabels[c1].el.appendTo(plotDiv); + } + + labelCoord = movingLabels[c1].func.apply(window, paramValues); + + pointOffset = plotObj.pointOffset({'x': labelCoord.x, 'y': labelCoord.y}); + + movingLabels[c1].el.css('left', pointOffset.left - 0.5 * movingLabels[c1].elWidth); + movingLabels[c1].el.css('top', pointOffset.top - 0.5 * movingLabels[c1].elHeight); + } + } + + // Generate markings to represent asymptotes defined by the user. + // See the following function for more details: + // + // function processAsymptote() + // + function generateMarkings() { + var c1, asymptote, markings, val; + + markings = []; + + for (c1 = 0; c1 < asymptotes.length; c1 += 1) { + asymptote = asymptotes[c1]; + + try { + val = asymptote.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate value from asymptote function.'); + logme('Error message: ', err.message); + + continue; + } + + if (asymptote.type === 'x') { + markings.push({ + 'color': asymptote.color, + 'lineWidth': 2, + 'xaxis': { + 'from': val, + 'to': val + } + }); + } else { + markings.push({ + 'color': asymptote.color, + 'lineWidth': 2, + 'yaxis': { + 'from': val, + 'to': val + } + }); + + } + } + + return markings; + } + } + + function xAxisTickFormatter(val, axis) { + if (xTicksNames.hasOwnProperty(val.toFixed(axis.tickDecimals)) === true) { + return xTicksNames[val.toFixed(axis.tickDecimals)]; + } + + return ''; + } + + function yAxisTickFormatter(val, axis) { + if (yTicksNames.hasOwnProperty(val.toFixed(axis.tickDecimals)) === true) { + return yTicksNames[val.toFixed(axis.tickDecimals)]; + } + + return ''; + } + } + + +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js new file mode 100644 index 0000000000..1434d05f70 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js @@ -0,0 +1,20 @@ +/* + * We will add a function that will be called for all GraphicalSliderTool + * xmodule module instances. It must be available globally by design of + * xmodule. + */ +window.GraphicalSliderTool = function (el) { + // All the work will be performed by the GstMain module. We will get access + // to it, and all it's dependencies, via Require JS. Currently Require JS + // is namespaced and is available via a global object RequireJS. + RequireJS.require(['GstMain'], function (GstMain) { + // The GstMain module expects the DOM ID of a Graphical Slider Tool + // element. Since we are given a
element which might in + // theory contain multiple graphical_slider_tool
elements (each + // with a unique DOM ID), we will iterate over all children, and for + // each match, we will call GstMain module. + $(el).children('.graphical_slider_tool').each(function (index, value) { + GstMain($(value).attr('id')); + }); + }); +}; diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js new file mode 100644 index 0000000000..34b54b4216 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js @@ -0,0 +1,84 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define( + 'GstMain', + + // Even though it is not explicitly in this module, we have to specify + // 'GeneralMethods' as a dependency. It expands some of the core JS objects + // with additional useful methods that are used in other modules. + ['State', 'GeneralMethods', 'Sliders', 'Inputs', 'Graph', 'ElOutput', 'GLabelElOutput', 'logme'], + function (State, GeneralMethods, Sliders, Inputs, Graph, ElOutput, GLabelElOutput, logme) { + + return GstMain; + + function GstMain(gstId) { + var config, gstClass, state; + + if ($('#' + gstId).attr('data-processed') !== 'processed') { + $('#' + gstId).attr('data-processed', 'processed'); + } else { + logme('MESSAGE: Already processed GST with ID ' + gstId + '. Skipping.'); + + return; + } + + // Get the JSON configuration, parse it, and store as an object. + try { + config = JSON.parse($('#' + gstId + '_json').html()).root; + } catch (err) { + logme('ERROR: could not parse config JSON.'); + logme('$("#" + gstId + "_json").html() = ', $('#' + gstId + '_json').html()); + logme('JSON.parse(...) = ', JSON.parse($('#' + gstId + '_json').html())); + logme('config = ', config); + + return; + } + + // Get the class name of the GST. All elements are assigned a class + // name that is based on the class name of the GST. For example, inputs + // are assigned a class name '{GST class name}_input'. + if (typeof config['@class'] !== 'string') { + logme('ERROR: Could not get the class name of GST.'); + logme('config["@class"] = ', config['@class']); + + return; + } + gstClass = config['@class']; + + // Parse the configuration settings for parameters, and store them in a + // state object. + state = State(gstId, config); + + // It is possible that something goes wrong while extracting parameters + // from the JSON config object. In this case, we will not continue. + if (state === undefined) { + logme('ERROR: The state object was not initialized properly.'); + + return; + } + + // Create the sliders and the text inputs, attaching them to + // appropriate parameters. + Sliders(gstId, state); + Inputs(gstId, gstClass, state); + + // Configure functions that output to an element instead of the graph. + ElOutput(config, state); + + // Configure functions that output to an element instead of the graph + // label. + GLabelElOutput(config, state); + + // Configure and display the graph. Attach event for the graph to be + // updated on any change of a slider or a text input. + Graph(gstId, config, state); + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js new file mode 100644 index 0000000000..a04ed113ec --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js @@ -0,0 +1,88 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('Inputs', ['logme'], function (logme) { + return Inputs; + + function Inputs(gstId, gstClass, state) { + var c1, paramName, allParamNames; + + allParamNames = state.getAllParameterNames(); + + for (c1 = 0; c1 < allParamNames.length; c1 += 1) { + $('#' + gstId).find('.' + gstClass + '_input').each(function (index, value) { + var inputDiv, paramName; + + paramName = allParamNames[c1]; + inputDiv = $(value); + + if (paramName === inputDiv.data('var')) { + createInput(inputDiv, paramName); + } + }); + } + + return; + + function createInput(inputDiv, paramName) { + var paramObj; + + paramObj = state.getParamObj(paramName); + + // Check that the retrieval went OK. + if (paramObj === undefined) { + logme('ERROR: Could not get a paramObj for parameter "' + paramName + '".'); + + return; + } + + // Bind a function to the 'change' event. Whenever the user changes + // the value of this text input, and presses 'enter' (or clicks + // somewhere else on the page), this event will be triggered, and + // our callback will be called. + inputDiv.bind('change', inputOnChange); + + inputDiv.val(paramObj.value); + + // Lets style the input element nicely. We will use the button() + // widget for this since there is no native widget for the text + // input. + inputDiv.button().css({ + 'font': 'inherit', + 'color': 'inherit', + 'text-align': 'left', + 'outline': 'none', + 'cursor': 'text', + 'height': '15px' + }); + + // Tell the parameter object from state that we are attaching a + // text input to it. Next time the parameter will be updated with + // a new value, tis input will also be updated. + paramObj.inputDivs.push(inputDiv); + + return; + + // Update the 'state' - i.e. set the value of the parameter this + // input is attached to to a new value. + // + // This will cause the plot to be redrawn each time after the user + // changes the value in the input. Note that he has to either press + // 'Enter', or click somewhere else on the page in order for the + // 'change' event to be tiggered. + function inputOnChange(event) { + var inputDiv; + + inputDiv = $(this); + state.setParameterValue(paramName, inputDiv.val(), inputDiv); + } + } + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js new file mode 100644 index 0000000000..7f9cd4a124 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js @@ -0,0 +1,236 @@ +function jstat(){} +j=jstat;(function(){var initializing=false,fnTest=/xyz/.test(function(){xyz;})?/\b_super\b/:/.*/;this.Class=function(){};Class.extend=function(prop){var _super=this.prototype;initializing=true;var prototype=new this();initializing=false;for(var name in prop){prototype[name]=typeof prop[name]=="function"&&typeof _super[name]=="function"&&fnTest.test(prop[name])?(function(name,fn){return function(){var tmp=this._super;this._super=_super[name];var ret=fn.apply(this,arguments);this._super=tmp;return ret;};})(name,prop[name]):prop[name];} +function Class(){if(!initializing&&this.init) +this.init.apply(this,arguments);} +Class.prototype=prototype;Class.constructor=Class;Class.extend=arguments.callee;return Class;};})();jstat.ONE_SQRT_2PI=0.3989422804014327;jstat.LN_SQRT_2PI=0.9189385332046727417803297;jstat.LN_SQRT_PId2=0.225791352644727432363097614947;jstat.DBL_MIN=2.22507e-308;jstat.DBL_EPSILON=2.220446049250313e-16;jstat.SQRT_32=5.656854249492380195206754896838;jstat.TWO_PI=6.283185307179586;jstat.DBL_MIN_EXP=-999;jstat.SQRT_2dPI=0.79788456080287;jstat.LN_SQRT_PI=0.5723649429247;jstat.seq=function(min,max,length){var r=new Range(min,max,length);return r.getPoints();} +jstat.dnorm=function(x,mean,sd,log){if(mean==null)mean=0;if(sd==null)sd=1;if(log==null)log=false;var n=new NormalDistribution(mean,sd);if(!isNaN(x)){return n._pdf(x,log);}else if(x.length){var res=[];for(var i=0;i
');$('#'+hash).dialog({modal:false,width:475,height:475,resizable:true,resize:function(){$.plot($('#graph-'+hash),[series],flotOpt);},open:function(event,ui){var id='#graph-'+hash;$.plot($('#graph-'+hash),[series],flotOpt);}})} +jstat.log10=function(arg){return Math.log(arg)/Math.LN10;} +jstat.toSigFig=function(num,n){if(num==0){return 0;} +var d=Math.ceil(jstat.log10(num<0?-num:num));var power=n-parseInt(d);var magnitude=Math.pow(10,power);var shifted=Math.round(num*magnitude);return shifted/magnitude;} +jstat.trunc=function(x){return(x>0)?Math.floor(x):Math.ceil(x);} +jstat.isFinite=function(x){return(!isNaN(x)&&(x!=Number.POSITIVE_INFINITY)&&(x!=Number.NEGATIVE_INFINITY));} +jstat.dopois_raw=function(x,lambda,give_log){if(lambda==0){if(x==0){return(give_log)?0.0:1.0;} +return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(!jstat.isFinite(lambda))return(give_log)?Number.NEGATIVE_INFINITY:0.0;if(x<0)return(give_log)?Number.NEGATIVE_INFINITY:0.0;if(x<=lambda*jstat.DBL_MIN){return(give_log)?-lambda:Math.exp(-lambda);} +if(lambda0.1*(x+np)){v=(x-np)/(x+np);s=(x-np)*v;ej=2*x*v;v=v*v;for(j=1;;j++){ej*=v;s1=s+ej/((j<<1)+1);if(s1==s) +return(s1);s=s1;}} +return(x*Math.log(x/np)+np-x);} +jstat.stirlerr=function(n){var S0=0.083333333333333333333;var S1=0.00277777777777777777778;var S2=0.00079365079365079365079365;var S3=0.000595238095238095238095238;var S4=0.0008417508417508417508417508;var sferr_halves=[0.0,0.1534264097200273452913848,0.0810614667953272582196702,0.0548141210519176538961390,0.0413406959554092940938221,0.03316287351993628748511048,0.02767792568499833914878929,0.02374616365629749597132920,0.02079067210376509311152277,0.01848845053267318523077934,0.01664469118982119216319487,0.01513497322191737887351255,0.01387612882307074799874573,0.01281046524292022692424986,0.01189670994589177009505572,0.01110455975820691732662991,0.010411265261972096497478567,0.009799416126158803298389475,0.009255462182712732917728637,0.008768700134139385462952823,0.008330563433362871256469318,0.007934114564314020547248100,0.007573675487951840794972024,0.007244554301320383179543912,0.006942840107209529865664152,0.006665247032707682442354394,0.006408994188004207068439631,0.006171712263039457647532867,0.005951370112758847735624416,0.005746216513010115682023589,0.005554733551962801371038690];var nn;if(n<=15.0){nn=n+n;if(nn==parseInt(nn))return(sferr_halves[parseInt(nn)]);return(jstat.lgamma(n+1.0)-(n+0.5)*Math.log(n)+n-jstat.LN_SQRT_2PI);} +nn=n*n;if(n>500)return((S0-S1/nn)/n);if(n>80)return((S0-(S1-S2/nn)/nn)/n);if(n>35)return((S0-(S1-(S2-S3/nn)/nn)/nn)/n);return((S0-(S1-(S2-(S3-S4/nn)/nn)/nn)/nn)/n);} +jstat.lgamma=function(x){function lgammafn_sign(x,sgn){var ans,y,sinpiy;var xmax=2.5327372760800758e+305;var dxrel=1.490116119384765696e-8;if(sgn!=null)sgn=1;if(isNaN(x))return x;if(x<0&&(Math.floor(-x)%2.0)==0) +if(sgn!=null)sgn=-1;if(x<=0&&x==jstat.trunc(x)){console.warn("Negative integer argument in lgammafn_sign");return Number.POSITIVE_INFINITY;} +y=Math.abs(x);if(y<=10)return Math.log(Math.abs(jstat.gamma(x)));if(y>xmax){console.warn("Illegal arguement passed to lgammafn_sign");return Number.POSITIVE_INFINITY;} +if(x>0){if(x>1e17){return(x*(Math.log(x)-1.0));}else if(x>4934720.0){return(jstat.LN_SQRT_2PI+(x-0.5)*Math.log(x)-x);}else{return jstat.LN_SQRT_2PI+(x-0.5)*Math.log(x)-x+jstat.lgammacor(x);}} +sinpiy=Math.abs(Math.sin(Math.PI*y));if(sinpiy==0){throw"Should never happen!!";} +ans=jstat.LN_SQRT_PId2+(x-0.5)*Math.log(y)-x-Math.log(sinpiy)-jstat.lgammacor(y);if(Math.abs((x-jstat.trunc(x-0.5))*ans/x)=jstat.DBL_MIN){res=1.0/y;}else{return(Number.POSITIVE_INFINITY);}}else if(y<12.0){yi=y;if(y<1.0){z=y;y+=1.0;}else{n=parseInt(y)-1;y-=parseFloat(n);z=y-1.0;} +xnum=0.0;xden=1.0;for(i=0;i<8;++i){xnum=(xnum+p[i])*z;xden=xden*z+q[i];} +res=xnum/xden+1.0;if(yiy){for(i=0;i=xmax){throw"Underflow error in lgammacor";}else if(xMAXIT){console.warn("a or b too big, or MAXIT too small in betacf: "+a+", "+b+", "+x+", "+h);return h;} +if(isNaN(h)){console.warn(a+", "+b+", "+x);} +return h;} +var bt;if(x<0.0||x>1.0){throw"bad x in routine incompleteBeta";} +if(x==0.0||x==1.0){bt=0.0;}else{bt=Math.exp(jstat.lgamma(a+b)-jstat.lgamma(a)-jstat.lgamma(b)+a*Math.log(x)+b*Math.log(1.0-x));} +if(x<(a+1.0)/(a+b+2.0)){return bt*betacf(a,b,x)/a;}else{return 1.0-bt*betacf(b,a,1.0-x)/b;}} +jstat.chebyshev=function(x,a,n){var b0,b1,b2,twox;var i;if(n<1||n>1000)return Number.NaN;if(x<-1.1||x>1.1)return Number.NaN;twox=x*2;b2=b1=0;b0=0;for(i=1;i<=n;i++){b2=b1;b1=b0;b0=twox*b1-b2+a[n-i];} +return(b0-b2)*0.5;} +jstat.fmin2=function(x,y){return(x1){return Math.log(1+x);} +for(var i=1;i0.697)return Math.exp(x)-1;if(a>1e-8){y=Math.exp(x)-1;}else{y=(x/2+1)*x;} +y-=(1+y)*(jstat.log1p(y)-x);return y;} +jstat.logBeta=function(a,b){var corr,p,q;p=q=a;if(bq)q=b;if(p<0){console.warn('Both arguements must be >= 0');return Number.NaN;} +else if(p==0){return Number.POSITIVE_INFINITY;} +else if(!jstat.isFinite(q)){return Number.NEGATIVE_INFINITY;} +if(p>=10){corr=jstat.lgammacor(p)+jstat.lgammacor(q)-jstat.lgammacor(p+q);return Math.log(q)*-0.5+jstat.LN_SQRT_2PI+corr ++(p-0.5)*Math.log(p/(p+q))+q*jstat.log1p(-p/(p+q));} +else if(q>=10){corr=jstat.lgammacor(q)-jstat.lgammacor(p+q);return jstat.lgamma(p)+corr+p-p*Math.log(p+q) ++(q-0.5)*jstat.log1p(-p/(p+q));} +else +return Math.log(jstat.gamma(p)*(jstat.gamma(q)/jstat.gamma(p+q)));} +jstat.dbinom_raw=function(x,n,p,q,give_log){if(give_log==null)give_log=false;var lf,lc;if(p==0){if(x==0){return(give_log)?0.0:1.0;}else{return(give_log)?Number.NEGATIVE_INFINITY:0.0;}} +if(q==0){if(x==n){return(give_log)?0.0:1.0;}else{return(give_log)?Number.NEGATIVE_INFINITY:0.0;}} +if(x==0){if(n==0)return(give_log)?0.0:1.0;lc=(p<0.1)?-jstat.bd0(n,n*q)-n*p:n*Math.log(q);return(give_log)?lc:Math.exp(lc);} +if(x==n){lc=(q<0.1)?-jstat.bd0(n,n*p)-n*q:n*Math.log(p);return(give_log)?lc:Math.exp(lc);} +if(x<0||x>n)return(give_log)?Number.NEGATIVE_INFINITY:0.0;lc=jstat.stirlerr(n)-jstat.stirlerr(x)-jstat.stirlerr(n-x)-jstat.bd0(x,n*p)-jstat.bd0(n-x,n*q);lf=Math.log(jstat.TWO_PI)+Math.log(x)+jstat.log1p(-x/n);return(give_log)?lc-0.5*lf:Math.exp(lc-0.5*lf);} +jstat.max=function(values){var max=Number.NEGATIVE_INFINITY;for(var i=0;imax){max=values[i];}} +return max;} +var Range=Class.extend({init:function(min,max,numPoints){this._minimum=parseFloat(min);this._maximum=parseFloat(max);this._numPoints=parseFloat(numPoints);},getMinimum:function(){return this._minimum;},getMaximum:function(){return this._maximum;},getNumPoints:function(){return this._numPoints;},getPoints:function(){var results=[];var x=this._minimum;var step=(this._maximum-this._minimum)/(this._numPoints-1);for(var i=0;ieps){xsq=x*x;xnum=a[4]*xsq;xden=xsq;for(i=0;i<3;++i){xnum=(xnum+a[i])*xsq;xden=(xden+b[i])*xsq;}}else{xnum=xden=0.0;} +temp=x*(xnum+a[3])/(xden+b[3]);if(lower)cum=0.5+temp;if(upper)ccum=0.5-temp;if(log_p){if(lower)cum=Math.log(cum);if(upper)ccum=Math.log(ccum);}}else if(y<=jstat.SQRT_32){xnum=c[8]*y;xden=y;for(i=0;i<7;++i){xnum=(xnum+c[i])*y;xden=(xden+d[i])*y;} +temp=(xnum+c[7])/(xden+d[7]);xsq=jstat.trunc(x*16)/16;del=(x-xsq)*(x+xsq);if(log_p){cum=(-xsq*xsq*0.5)+(-del*0.5)+Math.log(temp);if((lower&&x>0.)||(upper&&x<=0.)) +ccum=jstat.log1p(-Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp);} +else{cum=Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp;ccum=1.0-cum;} +if(x>0.0){temp=cum;if(lower){cum=ccum;} +ccum=temp;}} +else if((log_p&&y<1e170)||(lower&&-37.51930.)||(upper&&x<=0.)) +ccum=jstat.log1p(-Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp);} +else{cum=Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp;ccum=1.0-cum;} +if(x>0.0){temp=cum;if(lower){cum=ccum;} +ccum=temp;}}else{if(x>0){cum=(log_p)?0.0:1.0;ccum=(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{cum=(log_p)?Number.NEGATIVE_INFINITY:0.0;ccum=(log_p)?0.0:1.0;}} +return[cum,ccum];} +var p,cp;var mu=this._mean;var sigma=this._sigma;var R_DT_0,R_DT_1;if(lower_tail){if(log_p){R_DT_0=Number.NEGATIVE_INFINITY;R_DT_1=0.0;}else{R_DT_0=0.0;R_DT_1=1.0;}}else{if(log_p){R_DT_0=0.0;R_DT_1=Number.NEGATIVE_INFINITY;}else{R_DT_0=1.0;R_DT_1=0.0;}} +if(!jstat.isFinite(x)&&mu==x)return Number.NaN;if(sigma<=0){if(sigma<0){console.warn("Sigma is less than 0");return Number.NaN;} +return(x0){var nd=new NormalDistribution(meanlog,sdlog);return nd._cdf(Math.log(x),lower_tail,log_p);} +if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.0:1.0;}},getLocation:function(){return this._location;},getScale:function(){return this._scale;},getMean:function(){return Math.exp((this._location+this._scale)/2);},getVariance:function(){var ans=(Math.exp(this._scale)-1)*Math.exp(2*this._location+this._scale);return ans;}});var GammaDistribution=ContinuousDistribution.extend({init:function(shape,scale){this._super('Gamma');this._shape=parseFloat(shape);this._scale=parseFloat(scale);this._string="Gamma ("+this._shape.toFixed(2)+", "+this._scale.toFixed(2)+")";},_pdf:function(x,give_log){var pr;var shape=this._shape;var scale=this._scale;if(give_log==null){give_log=false;} +if(shape<0||scale<=0){throw"Illegal argument in _pdf";} +if(x<0){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(shape==0){return(x==0)?Number.POSITIVE_INFINITY:(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(x==0){if(shape<1)return Number.POSITIVE_INFINITY;if(shape>1)return(give_log)?Number.NEGATIVE_INFINITY:0.0;return(give_log)?-Math.log(scale):1/scale;} +if(shape<1){pr=jstat.dopois_raw(shape,x/scale,give_log);return give_log?pr+Math.log(shape/x):pr*shape/x;} +pr=jstat.dopois_raw(shape-1,x/scale,give_log);return give_log?pr-Math.log(scale):pr/scale;},_cdf:function(x,lower_tail,log_p){function USE_PNORM(){pn1=Math.sqrt(alph)*3.0*(Math.pow(x/alph,1.0/3.0)+1.0/(9.0*alph)-1.0);var norm_dist=new NormalDistribution(0.0,1.0);return norm_dist._cdf(pn1,lower_tail,log_p);} +if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var alph=this._shape;var scale=this._scale;var xbig=1.0e+8;var xlarge=1.0e+37;var alphlimit=1e5;var pn1,pn2,pn3,pn4,pn5,pn6,arg,a,b,c,an,osum,sum,n,pearson;if(alph<=0.||scale<=0.){console.warn('Invalid gamma params in _cdf');return Number.NaN;} +x/=scale;if(isNaN(x))return x;if(x<=0.0){if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.0:1.0;}} +if(alph>alphlimit){return USE_PNORM();} +if(x>xbig*alph){if(x>jstat.DBL_MAX*alph){if(lower_tail){return(log_p)?0.0:1.0;}else{return(log_p)?Number.NEGATIVE_INFINITY:0.0;}}else{return USE_PNORM();}} +if(x<=1.0||xjstat.DBL_EPSILON*sum);}else{pearson=0;arg=alph*Math.log(x)-x-jstat.lgamma(alph);a=1.-alph;b=a+x+1.;pn1=1.;pn2=x;pn3=x+1.;pn4=x*b;sum=pn3/pn4;for(n=1;;n++){a+=1.;b+=2.;an=a*n;pn5=b*pn3-an*pn1;pn6=b*pn4-an*pn2;if(Math.abs(pn6)>0.){osum=sum;sum=pn5/pn6;if(Math.abs(osum-sum)<=jstat.DBL_EPSILON*jstat.fmin2(1.0,sum)) +break;} +pn1=pn3;pn2=pn4;pn3=pn5;pn4=pn6;if(Math.abs(pn5)>=xlarge){pn1/=xlarge;pn2/=xlarge;pn3/=xlarge;pn4/=xlarge;}}} +arg+=Math.log(sum);lower_tail=(lower_tail==pearson);if(log_p&&lower_tail) +return(arg);if(lower_tail){return Math.exp(arg);}else{if(log_p){return(arg>-Math.LN2)?Math.log(-jstat.expm1(arg)):jstat.log1p(-Math.exp(arg));}else{return-jstat.expm1(arg);}}},getShape:function(){return this._shape;},getScale:function(){return this._scale;},getMean:function(){return this._shape*this._scale;},getVariance:function(){return this._shape*Math.pow(this._scale,2);}});var BetaDistribution=ContinuousDistribution.extend({init:function(alpha,beta){this._super('Beta');this._alpha=parseFloat(alpha);this._beta=parseFloat(beta);this._string="Beta ("+this._alpha.toFixed(2)+", "+this._beta.toFixed(2)+")";},_pdf:function(x,give_log){if(give_log==null)give_log=false;var a=this._alpha;var b=this._beta;var lval;if(a<=0||b<=0){console.warn('Illegal arguments in _pdf');return Number.NaN;} +if(x<0||x>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(x==0){if(a>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(a<1){return Number.POSITIVE_INFINITY;} +return(give_log)?Math.log(b):b;} +if(x==1){if(b>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(b<1){return Number.POSITIVE_INFINITY;} +return(give_log)?Math.log(a):a;} +if(a<=2||b<=2){lval=(a-1)*Math.log(x)+(b-1)*jstat.log1p(-x)-jstat.logBeta(a,b);}else{lval=Math.log(a+b-1)+jstat.dbinom_raw(a-1,a+b-2,x,1-x,true);} +return(give_log)?lval:Math.exp(lval);},_cdf:function(x,lower_tail,log_p){if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var pin=this._alpha;var qin=this._beta;if(pin<=0||qin<=0){console.warn('Invalid argument in _cdf');return Number.NaN;} +if(x<=0){if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.1:1.0;}} +if(x>=1){if(lower_tail){return(log_p)?0.1:1.0;}else{return(log_p)?Number.NEGATIVE_INFINITY:0.0;}} +return jstat.incompleteBeta(pin,qin,x);},getAlpha:function(){return this._alpha;},getBeta:function(){return this._beta;},getMean:function(){return this._alpha/(this._alpha+this._beta);},getVariance:function(){var ans=(this._alpha*this._beta)/(Math.pow(this._alpha+this._beta,2)*(this._alpha+this._beta+1));return ans;}});var StudentTDistribution=ContinuousDistribution.extend({init:function(degreesOfFreedom,mu){this._super('StudentT');this._dof=parseFloat(degreesOfFreedom);if(mu!=null){this._mu=parseFloat(mu);this._string="StudentT ("+this._dof.toFixed(2)+", "+this._mu.toFixed(2)+")";}else{this._mu=0.0;this._string="StudentT ("+this._dof.toFixed(2)+")";}},_pdf:function(x,give_log){if(give_log==null)give_log=false;if(this._mu==null){return this._dt(x,give_log);}else{var y=this._dnt(x,give_log);if(y>1){console.warn('x:'+x+', y: '+y);} +return y;}},_cdf:function(x,lower_tail,give_log){if(lower_tail==null)lower_tail=true;if(give_log==null)give_log=false;if(this._mu==null){return this._pt(x,lower_tail,give_log);}else{return this._pnt(x,lower_tail,give_log);}},_dt:function(x,give_log){var t,u;var n=this._dof;if(n<=0){console.warn('Invalid parameters in _dt');return Number.NaN;} +if(!jstat.isFinite(x)){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(!jstat.isFinite(n)){var norm=new NormalDistribution(0.0,1.0);return norm.density(x,give_log);} +t=-jstat.bd0(n/2.0,(n+1)/2.0)+jstat.stirlerr((n+1)/2.0)-jstat.stirlerr(n/2.0);if(x*x>0.2*n) +u=Math.log(1+x*x/n)*n/2;else +u=-jstat.bd0(n/2.0,(n+x*x)/2.0)+x*x/2.0;var p1=jstat.TWO_PI*(1+x*x/n);var p2=t-u;return(give_log)?-0.5*Math.log(p1)+p2:Math.exp(p2)/Math.sqrt(p1);},_dnt:function(x,give_log){if(give_log==null)give_log=false;var df=this._dof;var ncp=this._mu;var u;if(df<=0.0){console.warn("Illegal arguments _dnf");return Number.NaN;} +if(ncp==0.0){return this._dt(x,give_log);} +if(!jstat.isFinite(x)){if(give_log){return Number.NEGATIVE_INFINITY;}else{return 0.0;}} +if(!isFinite(df)||df>1e8){var dist=new NormalDistribution(ncp,1.);return dist.density(x,give_log);} +if(Math.abs(x)>Math.sqrt(df*jstat.DBL_EPSILON)){var newT=new StudentTDistribution(df+2,ncp);u=Math.log(df)-Math.log(Math.abs(x))+ +Math.log(Math.abs(newT._pnt(x*Math.sqrt((df+2)/df),true,false)- +this._pnt(x,true,false)));} +else{u=jstat.lgamma((df+1)/2)-jstat.lgamma(df/2) +-.5*(Math.log(Math.PI)+Math.log(df)+ncp*ncp);} +return(give_log?u:Math.exp(u));},_pt:function(x,lower_tail,log_p){if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var val,nx;var n=this._dof;var DT_0,DT_1;if(lower_tail){if(log_p){DT_0=Number.NEGATIVE_INFINITY;DT_1=1.;}else{DT_0=0.;DT_1=1.;}}else{if(log_p){DT_0=0.;DT_1=Number.NEGATIVE_INFINITY;}else{DT_0=1.;DT_1=0.;}} +if(n<=0.0){console.warn("Invalid T distribution _pt");return Number.NaN;} +var norm=new NormalDistribution(0,1);if(!jstat.isFinite(x)){return(x<0)?DT_0:DT_1;} +if(!jstat.isFinite(n)){return norm._cdf(x,lower_tail,log_p);} +if(n>4e5){val=1./(4.*n);return norm._cdf(x*(1.-val)/sqrt(1.+x*x*2.*val),lower_tail,log_p);} +nx=1+(x/n)*x;if(nx>1e100){var lval;lval=-0.5*n*(2*Math.log(Math.abs(x))-Math.log(n)) +-jstat.logBeta(0.5*n,0.5)-Math.log(0.5*n);val=log_p?lval:Math.exp(lval);}else{if(n>x*x){var beta=new BetaDistribution(0.5,n/2.);return beta._cdf(x*x/(n+x*x),false,log_p);}else{beta=new BetaDistribution(n/2.,0.5);return beta._cdf(1./nx,true,log_p);}} +if(x<=0.) +lower_tail=!lower_tail;if(log_p){if(lower_tail)return jstat.log1p(-0.5*Math.exp(val));else return val-M_LN2;} +else{val/=2.;if(lower_tail){return(0.5-val+0.5);}else{return val;}}},_pnt:function(t,lower_tail,log_p){var dof=this._dof;var ncp=this._mu;var DT_0,DT_1;if(lower_tail){if(log_p){DT_0=Number.NEGATIVE_INFINITY;DT_1=1.;}else{DT_0=0.;DT_1=1.;}}else{if(log_p){DT_0=0.;DT_1=Number.NEGATIVE_INFINITY;}else{DT_0=1.;DT_1=0.;}} +var albeta,a,b,del,errbd,lambda,rxb,tt,x;var geven,godd,p,q,s,tnc,xeven,xodd;var it,negdel;var ITRMAX=1000;var ERRMAX=1.e-7;if(dof<=0.0){return Number.NaN;}else if(dof==0.0){return this._pt(t);} +if(!jstat.isFinite(t)){return(t<0)?DT_0:DT_1;} +if(t>=0.){negdel=false;tt=t;del=ncp;}else{if(ncp>=40&&(!log_p||!lower_tail)){return DT_0;} +negdel=true;tt=-t;del=-ncp;} +if(dof>4e5||del*del>2*Math.LN2*(-(jstat.DBL_MIN_EXP))){s=1./(4.*dof);var norm=new NormalDistribution(del,Math.sqrt(1.+tt*tt*2.*s));var result=norm._cdf(tt*(1.-s),lower_tail!=negdel,log_p);return result;} +x=t*t;rxb=dof/(x+dof);x=x/(x+dof);if(x>0.){lambda=del*del;p=.5*Math.exp(-.5*lambda);if(p==0.){console.warn("underflow in _pnt");return DT_0;} +q=jstat.SQRT_2dPI*p*del;s=.5-p;if(s<1e-7){s=-0.5*jstat.expm1(-0.5*lambda);} +a=.5;b=.5*dof;rxb=Math.pow(rxb,b);albeta=jstat.LN_SQRT_PI+jstat.lgamma(b)-jstat.lgamma(.5+b);xodd=jstat.incompleteBeta(a,b,x);godd=2.*rxb*Math.exp(a*Math.log(x)-albeta);tnc=b*x;xeven=(tnc1)break;errbd=2.*s*(xodd-godd);if(Math.abs(errbd)1-1e-10&&lower_tail){console.warn("precision error _pnt");} +var res=jstat.fmin2(tnc,1.);if(lower_tail){if(log_p){return Math.log(res);}else{return res;}}else{if(log_p){return jstat.log1p(-(res));}else{return(0.5-(res)+0.5);}}},getDegreesOfFreedom:function(){return this._dof;},getNonCentralityParameter:function(){return this._mu;},getMean:function(){if(this._dof>1){var ans=(1/2)*Math.log(this._dof/2)+jstat.lgamma((this._dof-1)/2)-jstat.lgamma(this._dof/2) +return Math.exp(ans)*this._mu;}else{return Number.NaN;}},getVariance:function(){if(this._dof>2){var ans=this._dof*(1+this._mu*this._mu)/(this._dof-2)-(((this._mu*this._mu*this._dof)/2)*Math.pow(Math.exp(jstat.lgamma((this._dof-1)/2)-jstat.lgamma(this._dof/2)),2));return ans;}else{return Number.NaN;}}});var Plot=Class.extend({init:function(id,options){this._container='#'+String(id);this._plots=[];this._flotObj=null;this._locked=false;if(options!=null){this._options=options;}else{this._options={};}},getContainer:function(){return this._container;},getGraph:function(){return this._flotObj;},setData:function(data){this._plots=data;},clear:function(){this._plots=[];},showLegend:function(){this._options.legend={show:true} +this.render();},hideLegend:function(){this._options.legend={show:false} +this.render();},render:function(){this._flotObj=null;this._flotObj=$.plot($(this._container),this._plots,this._options);}});var DistributionPlot=Plot.extend({init:function(id,distribution,range,options){this._super(id,options);this._showPDF=true;this._showCDF=false;this._pdfValues=[];this._cdfValues=[];this._maxY=1;this._plotType='line';this._fill=false;this._distribution=distribution;if(range!=null&&Range.validate(range)){this._range=range;}else{this._range=this._distribution.getRange();} +if(this._distribution!=null){this._maxY=this._generateValues();}else{this._options.xaxis={min:range.getMinimum(),max:range.getMaximum()} +this._options.yaxis={max:1}} +this.render();},setHover:function(bool){if(bool){if(this._options.grid==null){this._options.grid={hoverable:true,mouseActiveRadius:25}}else{this._options.grid.hoverable=true,this._options.grid.mouseActiveRadius=25} +function showTooltip(x,y,contents,color){$('
'+contents+'
').css({position:'absolute',display:'none',top:y+15,'font-size':'small',left:x+5,border:'1px solid '+color[1],color:color[2],padding:'5px','background-color':color[0],opacity:0.80}).appendTo("body").show();} +var previousPoint=null;$(this._container).bind("plothover",function(event,pos,item){$("#x").text(pos.x.toFixed(2));$("#y").text(pos.y.toFixed(2));if(item){if(previousPoint!=item.datapoint){previousPoint=item.datapoint;$("#jstat_tooltip").remove();var x=jstat.toSigFig(item.datapoint[0],2),y=jstat.toSigFig(item.datapoint[1],2);var text=null;var color=item.series.color;if(item.series.label=='PDF'){text="P("+x+") = "+y;color=["#fee","#fdd","#C05F5F"];}else{text="F("+x+") = "+y;color=["#eef","#ddf","#4A4AC0"];} +showTooltip(item.pageX,item.pageY,text,color);}} +else{$("#jstat_tooltip").remove();previousPoint=null;}});$(this._container).bind("mouseleave",function(){if($('#jstat_tooltip').is(':visible')){$('#jstat_tooltip').remove();previousPoint=null;}});}else{if(this._options.grid==null){this._options.grid={hoverable:false}}else{this._options.grid.hoverable=false} +$(this._container).unbind("plothover");} +this.render();},setType:function(type){this._plotType=type;var lines={};var points={};if(this._plotType=='line'){lines.show=true;points.show=false;}else if(this._plotType=='points'){lines.show=false;points.show=true;}else if(this._plotType=='both'){lines.show=true;points.show=true;} +if(this._options.series==null){this._options.series={lines:lines,points:points}}else{if(this._options.series.lines==null){this._options.series.lines=lines;}else{this._options.series.lines.show=lines.show;} +if(this._options.series.points==null){this._options.series.points=points;}else{this._options.series.points.show=points.show;}} +this.render();},setFill:function(bool){this._fill=bool;if(this._options.series==null){this._options.series={lines:{fill:bool}}}else{if(this._options.series.lines==null){this._options.series.lines={fill:bool}}else{this._options.series.lines.fill=bool;}} +this.render();},clear:function(){this._super();this._distribution=null;this._pdfValues=[];this._cdfValues=[];this.render();},_generateValues:function(){this._cdfValues=[];this._pdfValues=[];var xs=this._range.getPoints();this._options.xaxis={min:xs[0],max:xs[xs.length-1]} +var pdfs=this._distribution.density(this._range);var cdfs=this._distribution.cumulativeDensity(this._range);for(var i=0;i 1) { + logme('ERROR: Found more than one slider for the parameter "' + paramName + '".'); + logme('sliderDiv.length = ', sliderDiv.length); + } else { + logme('MESSAGE: Did not find a slider for the parameter "' + paramName + '".'); + } + } + + function createSlider(sliderDiv, paramName) { + var paramObj; + + paramObj = state.getParamObj(paramName); + + // Check that the retrieval went OK. + if (paramObj === undefined) { + logme('ERROR: Could not get a paramObj for parameter "' + paramName + '".'); + + return; + } + + // Create a jQuery UI slider from the slider DIV. We will set + // starting parameters, and will also attach a handler to update + // the 'state' on the 'slide' event. + sliderDiv.slider({ + 'min': paramObj.min, + 'max': paramObj.max, + 'value': paramObj.value, + 'step': paramObj.step + }); + + // Tell the parameter object stored in state that we have a slider + // that is attached to it. Next time when the parameter changes, it + // will also update the value of this slider. + paramObj.sliderDiv = sliderDiv; + + // Atach callbacks to update the slider's parameter. + paramObj.sliderDiv.on('slide', sliderOnSlide); + paramObj.sliderDiv.on('slidechange', sliderOnChange); + + return; + + // Update the 'state' - i.e. set the value of the parameter this + // slider is attached to to a new value. + // + // This will cause the plot to be redrawn each time after the user + // drags the slider handle and releases it. + function sliderOnSlide(event, ui) { + // Last parameter passed to setParameterValue() will be 'true' + // so that the function knows we are a slider, and it can + // change the our value back in the case when the new value is + // invalid for some reason. + if (state.setParameterValue(paramName, ui.value, sliderDiv, true, 'slide') === undefined) { + logme('ERROR: Could not update the parameter named "' + paramName + '" with the value "' + ui.value + '".'); + } + } + + function sliderOnChange(event, ui) { + if (state.setParameterValue(paramName, ui.value, sliderDiv, true, 'change') === undefined) { + logme('ERROR: Could not update the parameter named "' + paramName + '" with the value "' + ui.value + '".'); + } + } + } + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js new file mode 100644 index 0000000000..8b534fd19d --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js @@ -0,0 +1,395 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('State', ['logme'], function (logme) { + var stateInst; + + // Since there will be (can be) multiple GST on a page, and each will have + // a separate state, we will create a factory constructor function. The + // constructor will expect the ID of the DIV with the GST contents, and the + // configuration object (parsed from a JSON string). It will return an + // object containing methods to set and get the private state properties. + + stateInst = 0; + + // This module defines and returns a factory constructor. + return State; + + function State(gstId, config) { + var parameters, allParameterNames, allParameterValues, + plotDiv, dynamicEl, dynamicElByElId; + + dynamicEl = []; + dynamicElByElId = {}; + + stateInst += 1; + logme('MESSAGE: Creating state instance # ' + stateInst + '.'); + + // Initially, there are no parameters to track. So, we will instantiate + // an empty object. + // + // As we parse the JSON config object, we will add parameters as + // named properties. For example + // + // parameters.a = {...}; + // + // will be created for the parameter 'a'. + parameters = {}; + + // Check that the required parameters config object is available. + if ($.isPlainObject(config.parameters) === false) { + logme('ERROR: Expected config.parameters to be an object. It is not.'); + logme('config.parameters = ', config.parameters); + + return; + } + + // If config.parameters.param is an array, pass it to the processor + // element by element. + if ($.isArray(config.parameters.param) === true) { + (function (c1) { + while (c1 < config.parameters.param.length) { + processParameter(config.parameters.param[c1]); + c1 += 1; + } + }(0)); + } + + // If config.parameters.param is an object, pass this object to the + // processor directly. + else if ($.isPlainObject(config.parameters.param) === true) { + processParameter(config.parameters.param); + } + + // If config.parameters.param is some other type, report an error and + // do not continue. + else { + logme('ERROR: config.parameters.param is of an unsupported type.'); + logme('config.parameters.param = ', config.parameters.param); + + return; + } + + // Instead of building these arrays every time when some component + // requests them, we will create them in the beginning, and then update + // each element individually when some parameter's value changes. + // + // Then we can just return the required array, instead of iterating + // over all of the properties of the 'parameters' object, and + // extracting their names/values one by one. + allParameterNames = []; + allParameterValues = []; + + // Populate 'allParameterNames', and 'allParameterValues' with data. + generateHelperArrays(); + + // The constructor will return an object with methods to operate on + // it's private properties. + return { + 'getParameterValue': getParameterValue, + 'setParameterValue': setParameterValue, + + 'getParamObj': getParamObj, + + 'getAllParameterNames': getAllParameterNames, + 'getAllParameterValues': getAllParameterValues, + + 'bindUpdatePlotEvent': bindUpdatePlotEvent, + 'addDynamicEl': addDynamicEl, + + // plde is an abbreviation for Plot Label Dynamic Elements. + plde: [] + }; + + function getAllParameterNames() { + return allParameterNames; + } + + function getAllParameterValues() { + return allParameterValues; + } + + function getParamObj(paramName) { + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + return parameters[paramName]; + } + + function bindUpdatePlotEvent(newPlotDiv, callback) { + plotDiv = newPlotDiv; + + plotDiv.bind('update_plot', callback); + } + + function addDynamicEl(el, func, elId, updateOnEvent) { + var newLength; + + newLength = dynamicEl.push({ + 'el': el, + 'func': func, + 'elId': elId, + 'updateOnEvent': updateOnEvent + }); + + if (typeof dynamicElByElId[elId] !== 'undefined') { + logme( + 'ERROR: Duplicate dynamic element ID "' + elId + '" found.' + ); + } else { + dynamicElByElId[elId] = dynamicEl[newLength - 1]; + } + } + + function getParameterValue(paramName) { + + // If the name of the constant is not tracked by state, return an + // 'undefined' value. + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + return parameters[paramname].value; + } + + // #################################################################### + // + // Function: setParameterValue(paramName, paramValue, element) + // -------------------------------------------------- + // + // + // This function can be called from a callback, registered by a slider + // or a text input, when specific events ('slide' or 'change') are + // triggered. + // + // The 'paramName' is the name of the parameter in 'parameters' object + // whose value must be updated to the new value of 'paramValue'. + // + // Before we update the value, we must check that: + // + // 1.) the parameter named as 'paramName' actually exists in the + // 'parameters' object; + // 2.) the value 'paramValue' is a valid floating-point number, and + // it lies within the range specified by the 'min' and 'max' + // properties of the stored parameter object. + // + // If 'paramName' and 'paramValue' turn out to be valid, we will update + // the stored value in the parameter with the new value, and also + // update all of the text inputs and the slider that correspond to this + // parameter (if any), so that they reflect the new parameter's value. + // Finally, the helper array 'allParameterValues' will also be updated + // to reflect the change. + // + // If something went wrong (for example the new value is outside the + // allowed range), then we will reset the 'element' to display the + // original value. + // + // #################################################################### + function setParameterValue(paramName, paramValue, element, slider, updateOnEvent) { + var paramValueNum, c1; + + // If a parameter with the name specified by the 'paramName' + // parameter is not tracked by state, do not do anything. + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + // Try to convert the passed value to a valid floating-point + // number. + paramValueNum = parseFloat(paramValue); + + // We are interested only in valid float values. NaN, -INF, + // +INF we will disregard. + if (isFinite(paramValueNum) === false) { + logme('ERROR: New parameter value is not a floating-point number.'); + logme('paramValue = ', paramValue); + + return; + } + + if (paramValueNum < parameters[paramName].min) { + paramValueNum = parameters[paramName].min; + } else if (paramValueNum > parameters[paramName].max) { + paramValueNum = parameters[paramName].max; + } + + parameters[paramName].value = paramValueNum; + + // Update all text inputs with the new parameter's value. + for (c1 = 0; c1 < parameters[paramName].inputDivs.length; c1 += 1) { + parameters[paramName].inputDivs[c1].val(paramValueNum); + } + + // Update the single slider with the new parameter's value. + if ((slider === false) && (parameters[paramName].sliderDiv !== null)) { + parameters[paramName].sliderDiv.slider('value', paramValueNum); + } + + // Update the helper array with the new parameter's value. + allParameterValues[parameters[paramName].helperArrayIndex] = paramValueNum; + + for (c1 = 0; c1 < dynamicEl.length; c1++) { + if ( + ((updateOnEvent !== undefined) && (dynamicEl[c1].updateOnEvent === updateOnEvent)) || + (updateOnEvent === undefined) + ) { + // If we have a DOM element, call the function "paste" the answer into the DIV. + if (dynamicEl[c1].el !== null) { + dynamicEl[c1].el.html(dynamicEl[c1].func.apply(window, allParameterValues)); + } + // If we DO NOT have an element, simply call the function. The function can then + // manipulate all the DOM elements it wants, without the fear of them being overwritten + // by us afterwards. + else { + dynamicEl[c1].func.apply(window, allParameterValues); + } + } + } + + // If we have a plot DIV to work with, tell to update. + if (plotDiv !== undefined) { + plotDiv.trigger('update_plot'); + } + + return true; + } // End-of: function setParameterValue + + // #################################################################### + // + // Function: processParameter(obj) + // ------------------------------- + // + // + // This function will be run once for each instance of a GST when + // parsing the JSON config object. + // + // 'newParamObj' must be empty from the start for each invocation of + // this function, that's why we will declare it locally. + // + // We will parse the passed object 'obj' and populate the 'newParamObj' + // object with required properties. + // + // Since there will be many properties that are of type floating-point + // number, we will have a separate function for parsing them. + // + // processParameter() will fail right away if 'obj' does not have a + // '@var' property which represents the name of the parameter we want + // to process. + // + // If, after all of the properties have been processed, we reached the + // end of the function successfully, the 'newParamObj' will be added to + // the 'parameters' object (that is defined in the scope of State() + // function) as a property named as the name of the parameter. + // + // If at least one of the properties from 'obj' does not get correctly + // parsed, then the parameter represented by 'obj' will be disregarded. + // It will not be available to user-defined plotting functions, and + // things will most likely break. We will notify the user about this. + // + // #################################################################### + function processParameter(obj) { + var paramName, newParamObj; + + if (typeof obj['@var'] !== 'string') { + logme('ERROR: Expected obj["@var"] to be a string. It is not.'); + logme('obj["@var"] = ', obj['@var']); + + return; + } + + paramName = obj['@var']; + newParamObj = {}; + + if ( + (processFloat('@min', 'min') === false) || + (processFloat('@max', 'max') === false) || + (processFloat('@step', 'step') === false) || + (processFloat('@initial', 'value') === false) + ) { + logme('ERROR: A required property is missing. Not creating parameter "' + paramName + '"'); + + return; + } + + // Pointers to text input and slider DIV elements that this + // parameter will be attached to. Initially there are none. When we + // will create text inputs and sliders, we will update these + // properties. + newParamObj.inputDivs = []; + newParamObj.sliderDiv = null; + + // Everything went well, so save the new parameter object. + parameters[paramName] = newParamObj; + + return; + + function processFloat(attrName, newAttrName) { + var attrValue; + + if (typeof obj[attrName] !== 'string') { + logme('ERROR: Expected obj["' + attrName + '"] to be a string. It is not.'); + logme('obj["' + attrName + '"] = ', obj[attrName]); + + return false; + } else { + attrValue = parseFloat(obj[attrName]); + + if (isFinite(attrValue) === false) { + logme('ERROR: Expected obj["' + attrName + '"] to be a valid floating-point number. It is not.'); + logme('obj["' + attrName + '"] = ', obj[attrName]); + + return false; + } + } + + newParamObj[newAttrName] = attrValue; + + return true; + } // End-of: function processFloat + } // End-of: function processParameter + + // #################################################################### + // + // Function: generateHelperArrays() + // ------------------------------- + // + // + // Populate 'allParameterNames' and 'allParameterValues' with data. + // Link each parameter object with the corresponding helper array via + // an index 'helperArrayIndex'. It will be the same for both of the + // arrays. + // + // NOTE: It is important to remember to update these helper arrays + // whenever a new parameter is added (or one is removed), or when a + // parameter's value changes. + // + // #################################################################### + function generateHelperArrays() { + var paramName, c1; + + c1 = 0; + for (paramName in parameters) { + allParameterNames.push(paramName); + allParameterValues.push(parameters[paramName].value); + + parameters[paramName].helperArrayIndex = c1; + + c1 += 1; + } + } + } // End-of: function State +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee b/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee deleted file mode 100644 index 5b70ab29aa..0000000000 --- a/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee +++ /dev/null @@ -1,133 +0,0 @@ -class @SelfAssessment - constructor: (element) -> - @el = $(element).find('section.self-assessment') - @id = @el.data('id') - @ajax_url = @el.data('ajax-url') - @state = @el.data('state') - @allow_reset = @el.data('allow_reset') - # valid states: 'initial', 'assessing', 'request_hint', 'done' - - # Where to put the rubric once we load it - @errors_area = @$('.error') - @answer_area = @$('textarea.answer') - - @rubric_wrapper = @$('.rubric-wrapper') - @hint_wrapper = @$('.hint-wrapper') - @message_wrapper = @$('.message-wrapper') - @submit_button = @$('.submit-button') - @reset_button = @$('.reset-button') - @reset_button.click @reset - - @find_assessment_elements() - @find_hint_elements() - - @rebind() - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - rebind: () => - # rebind to the appropriate function for the current state - @submit_button.unbind('click') - @submit_button.show() - @reset_button.hide() - @hint_area.attr('disabled', false) - if @state == 'initial' - @answer_area.attr("disabled", false) - @submit_button.prop('value', 'Submit') - @submit_button.click @save_answer - else if @state == 'assessing' - @answer_area.attr("disabled", true) - @submit_button.prop('value', 'Submit assessment') - @submit_button.click @save_assessment - else if @state == 'request_hint' - @answer_area.attr("disabled", true) - @submit_button.prop('value', 'Submit hint') - @submit_button.click @save_hint - else if @state == 'done' - @answer_area.attr("disabled", true) - @hint_area.attr('disabled', true) - @submit_button.hide() - if @allow_reset - @reset_button.show() - else - @reset_button.hide() - - - find_assessment_elements: -> - @assessment = @$('select.assessment') - - find_hint_elements: -> - @hint_area = @$('textarea.hint') - - save_answer: (event) => - event.preventDefault() - if @state == 'initial' - data = {'student_answer' : @answer_area.val()} - $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) => - if response.success - @rubric_wrapper.html(response.rubric_html) - @state = 'assessing' - @find_assessment_elements() - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - save_assessment: (event) => - event.preventDefault() - if @state == 'assessing' - data = {'assessment' : @assessment.find(':selected').text()} - $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => - if response.success - @state = response.state - - if @state == 'request_hint' - @hint_wrapper.html(response.hint_html) - @find_hint_elements() - else if @state == 'done' - @message_wrapper.html(response.message_html) - @allow_reset = response.allow_reset - - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - - save_hint: (event) => - event.preventDefault() - if @state == 'request_hint' - data = {'hint' : @hint_area.val()} - - $.postWithPrefix "#{@ajax_url}/save_hint", data, (response) => - if response.success - @message_wrapper.html(response.message_html) - @state = 'done' - @allow_reset = response.allow_reset - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - - reset: (event) => - event.preventDefault() - if @state == 'done' - $.postWithPrefix "#{@ajax_url}/reset", {}, (response) => - if response.success - @answer_area.val('') - @rubric_wrapper.html('') - @hint_wrapper.html('') - @message_wrapper.html('') - @state = 'initial' - @rebind() - @reset_button.hide() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') diff --git a/common/lib/xmodule/xmodule/js/src/video/display.coffee b/common/lib/xmodule/xmodule/js/src/video/display.coffee index 6587f05899..a170075b68 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display.coffee @@ -2,6 +2,8 @@ class @Video constructor: (element) -> @el = $(element).find('.video') @id = @el.attr('id').replace(/video_/, '') + @start = @el.data('start') + @end = @el.data('end') @caption_data_dir = @el.data('caption-data-dir') @show_captions = @el.data('show-captions') == "true" window.player = null diff --git a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee index 8829e25dac..ec52d15874 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee @@ -36,14 +36,21 @@ class @VideoPlayer extends Subview @volumeControl = new VideoVolumeControl el: @$('.secondary-controls') @speedControl = new VideoSpeedControl el: @$('.secondary-controls'), speeds: @video.speeds, currentSpeed: @currentSpeed() @progressSlider = new VideoProgressSlider el: @$('.slider') + @playerVars = + controls: 0 + wmode: 'transparent' + rel: 0 + showinfo: 0 + enablejsapi: 1 + modestbranding: 1 + if @video.start + @playerVars.start = @video.start + if @video.end + # work in AS3, not HMLT5. but iframe use AS3 + @playerVars.end = @video.end + @player = new YT.Player @video.id, - playerVars: - controls: 0 - wmode: 'transparent' - rel: 0 - showinfo: 0 - enablejsapi: 1 - modestbranding: 1 + playerVars: @playerVars videoId: @video.youtubeId() events: onReady: @onReady diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py index 5b94add68f..f86a6e9600 100644 --- a/common/lib/xmodule/xmodule/modulestore/__init__.py +++ b/common/lib/xmodule/xmodule/modulestore/__init__.py @@ -345,9 +345,9 @@ class ModuleStore(object): ''' raise NotImplementedError - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). returns an iterable of things that can be passed to Location. ''' diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py index baa4e7870c..4c7ef3c050 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo.py @@ -309,9 +309,9 @@ class MongoModuleStore(ModuleStoreBase): self._update_single_item(location, {'metadata': metadata}) - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). If there is no data at location in this modulestore, raise ItemNotFoundError. diff --git a/common/lib/xmodule/xmodule/modulestore/search.py b/common/lib/xmodule/xmodule/modulestore/search.py index f9901e8bfe..4a5ece6854 100644 --- a/common/lib/xmodule/xmodule/modulestore/search.py +++ b/common/lib/xmodule/xmodule/modulestore/search.py @@ -64,7 +64,7 @@ def path_to_location(modulestore, course_id, location): # isn't found so we don't have to do it explicitly. Call this # first to make sure the location is there (even if it's a course, and # we would otherwise immediately exit). - parents = modulestore.get_parent_locations(loc) + parents = modulestore.get_parent_locations(loc, course_id) # print 'Processing loc={0}, path={1}'.format(loc, path) if loc.category == "course": diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py index c1d1d50a53..64816581ce 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py @@ -23,12 +23,3 @@ def check_path_to_location(modulestore): for location in not_found: assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location) - # Since our test files are valid, there shouldn't be any - # elements with no path to them. But we can look for them in - # another course. - no_path = ( - "i4x://edX/simple/video/Lost_Video", - ) - for location in no_path: - assert_raises(NoPathToItem, path_to_location, modulestore, course_id, location) - diff --git a/common/lib/xmodule/xmodule/modulestore/xml.py b/common/lib/xmodule/xmodule/modulestore/xml.py index 6b3ff9bff4..04f3a94d1b 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml.py +++ b/common/lib/xmodule/xmodule/modulestore/xml.py @@ -275,14 +275,16 @@ class XMLModuleStore(ModuleStoreBase): class_ = getattr(import_module(module_path), class_name) self.default_class = class_ - self.parent_tracker = ParentTracker() + self.parent_trackers = defaultdict(ParentTracker) # If we are specifically asked for missing courses, that should # be an error. If we are asked for "all" courses, find the ones - # that have a course.xml + # that have a course.xml. We sort the dirs in alpha order so we always + # read things in the same order (OS differences in load order have + # bitten us in the past.) if course_dirs is None: - course_dirs = [d for d in os.listdir(self.data_dir) if - os.path.exists(self.data_dir / d / "course.xml")] + course_dirs = sorted([d for d in os.listdir(self.data_dir) if + os.path.exists(self.data_dir / d / "course.xml")]) for course_dir in course_dirs: self.try_load_course(course_dir) @@ -307,7 +309,7 @@ class XMLModuleStore(ModuleStoreBase): if course_descriptor is not None: self.courses[course_dir] = course_descriptor self._location_errors[course_descriptor.location] = errorlog - self.parent_tracker.make_known(course_descriptor.location) + self.parent_trackers[course_descriptor.id].make_known(course_descriptor.location) else: # Didn't load course. Instead, save the errors elsewhere. self.errored_courses[course_dir] = errorlog @@ -432,7 +434,7 @@ class XMLModuleStore(ModuleStoreBase): course_dir, policy, tracker, - self.parent_tracker, + self.parent_trackers[course_id], self.load_error_modules, ) @@ -541,9 +543,9 @@ class XMLModuleStore(ModuleStoreBase): """ raise NotImplementedError("XMLModuleStores are read-only") - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). If there is no data at location in this modulestore, raise ItemNotFoundError. @@ -552,7 +554,7 @@ class XMLModuleStore(ModuleStoreBase): be empty if there are no parents. ''' location = Location.ensure_fully_specified(location) - if not self.parent_tracker.is_known(location): - raise ItemNotFoundError(location) + if not self.parent_trackers[course_id].is_known(location): + raise ItemNotFoundError("{0} not in {1}".format(location, course_id)) - return self.parent_tracker.parents(location) + return self.parent_trackers[course_id].parents(location) diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py new file mode 100644 index 0000000000..11f96c9848 --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -0,0 +1,660 @@ +""" +A Self Assessment module that allows students to write open-ended responses, +submit, then see a rubric and rate themselves. Persists student supplied +hints, answers, and assessment judgment (currently only correct/incorrect). +Parses xml definition file--see below for exact format. +""" + +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys +import hashlib +import capa.xqueue_interface as xqueue_interface + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +from capa.util import * +import openendedchild + +from mitxmako.shortcuts import render_to_string +from numpy import median + +from datetime import datetime + +from combined_open_ended_rubric import CombinedOpenEndedRubric + +log = logging.getLogger("mitx.courseware") + +class OpenEndedModule(openendedchild.OpenEndedChild): + """ + The open ended module supports all external open ended grader problems. + Sample XML file: + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + + """ + + def setup_response(self, system, location, definition, descriptor): + """ + Sets up the response type. + @param system: Modulesystem object + @param location: The location of the problem + @param definition: The xml definition of the problem + @param descriptor: The OpenEndedDescriptor associated with this + @return: None + """ + oeparam = definition['oeparam'] + + self.url = definition.get('url', None) + self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE) + self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE) + + #This is needed to attach feedback to specific responses later + self.submission_id = None + self.grader_id = None + + if oeparam is None: + raise ValueError("No oeparam found in problem xml.") + if self.prompt is None: + raise ValueError("No prompt found in problem xml.") + if self.rubric is None: + raise ValueError("No rubric found in problem xml.") + + self._parse(oeparam, self.prompt, self.rubric, system) + + if self.created == True and self.state == self.ASSESSING: + self.created = False + self.send_to_grader(self.latest_answer(), system) + self.created = False + + def _parse(self, oeparam, prompt, rubric, system): + ''' + Parse OpenEndedResponse XML: + self.initial_display + self.payload - dict containing keys -- + 'grader' : path to grader settings file, 'problem_id' : id of the problem + + self.answer - What to display when show answer is clicked + ''' + # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload + prompt_string = stringify_children(prompt) + rubric_string = stringify_children(rubric) + self.prompt = prompt_string + self.rubric = rubric_string + + grader_payload = oeparam.find('grader_payload') + grader_payload = grader_payload.text if grader_payload is not None else '' + + #Update grader payload with student id. If grader payload not json, error. + try: + parsed_grader_payload = json.loads(grader_payload) + # NOTE: self.system.location is valid because the capa_module + # __init__ adds it (easiest way to get problem location into + # response types) + except TypeError, ValueError: + log.exception("Grader payload %r is not a json object!", grader_payload) + + self.initial_display = find_with_default(oeparam, 'initial_display', '') + self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') + + parsed_grader_payload.update({ + 'location': system.location.url(), + 'course_id': system.course_id, + 'prompt': prompt_string, + 'rubric': rubric_string, + 'initial_display': self.initial_display, + 'answer': self.answer, + }) + updated_grader_payload = json.dumps(parsed_grader_payload) + + self.payload = {'grader_payload': updated_grader_payload} + + def skip_post_assessment(self, get, system): + """ + Ajax function that allows one to skip the post assessment phase + @param get: AJAX dictionary + @param system: ModuleSystem + @return: Success indicator + """ + self.state = self.DONE + return {'success': True} + + def message_post(self, get, system): + """ + Handles a student message post (a reaction to the grade they received from an open ended grader type) + Returns a boolean success/fail and an error message + """ + + event_info = dict() + event_info['problem_id'] = system.location.url() + event_info['student_id'] = system.anonymous_student_id + event_info['survey_responses'] = get + + survey_responses = event_info['survey_responses'] + for tag in ['feedback', 'submission_id', 'grader_id', 'score']: + if tag not in survey_responses: + return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)} + try: + submission_id = int(survey_responses['submission_id']) + grader_id = int(survey_responses['grader_id']) + feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) + score = int(survey_responses['score']) + except: + error_message = ("Could not parse submission id, grader id, " + "or feedback from message_post ajax call. Here is the message data: {0}".format( + survey_responses)) + log.exception(error_message) + return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} + + qinterface = system.xqueue['interface'] + qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) + anonymous_student_id = system.anonymous_student_id + queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + + anonymous_student_id + + str(len(self.history))) + + xheader = xqueue_interface.make_xheader( + lms_callback_url=system.xqueue['callback_url'], + lms_key=queuekey, + queue_name=self.message_queue_name + ) + + student_info = {'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, + } + contents = { + 'feedback': feedback, + 'submission_id': submission_id, + 'grader_id': grader_id, + 'score': score, + 'student_info': json.dumps(student_info), + } + + (error, msg) = qinterface.send_to_queue(header=xheader, + body=json.dumps(contents)) + + #Convert error to a success value + success = True + if error: + success = False + + self.state = self.DONE + + return {'success': success, 'msg': "Successfully submitted your feedback."} + + def send_to_grader(self, submission, system): + """ + Send a given submission to the grader, via the xqueue + @param submission: The student submission to send to the grader + @param system: Modulesystem + @return: Boolean true (not useful right now) + """ + + # Prepare xqueue request + #------------------------------------------------------------ + + qinterface = system.xqueue['interface'] + qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) + + anonymous_student_id = system.anonymous_student_id + + # Generate header + queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + + anonymous_student_id + + str(len(self.history))) + + xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'], + lms_key=queuekey, + queue_name=self.queue_name) + + contents = self.payload.copy() + + # Metadata related to the student submission revealed to the external grader + student_info = {'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, + } + + #Update contents with student response and student info + contents.update({ + 'student_info': json.dumps(student_info), + 'student_response': submission, + 'max_score': self.max_score(), + }) + + # Submit request. When successful, 'msg' is the prior length of the queue + (error, msg) = qinterface.send_to_queue(header=xheader, + body=json.dumps(contents)) + + # State associated with the queueing request + queuestate = {'key': queuekey, + 'time': qtime, } + return True + + def _update_score(self, score_msg, queuekey, system): + """ + Called by xqueue to update the score + @param score_msg: The message from xqueue + @param queuekey: The key sent by xqueue + @param system: Modulesystem + @return: Boolean True (not useful currently) + """ + new_score_msg = self._parse_score_msg(score_msg) + if not new_score_msg['valid']: + score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.' + + self.record_latest_score(new_score_msg['score']) + self.record_latest_post_assessment(score_msg) + self.state = self.POST_ASSESSMENT + + return True + + + def get_answers(self): + """ + Gets and shows the answer for this problem. + @return: Answer html + """ + anshtml = '
{0}
'.format(self.answer) + return {self.answer_id: anshtml} + + def get_initial_display(self): + """ + Gets and shows the initial display for the input box. + @return: Initial display html + """ + return {self.answer_id: self.initial_display} + + def _convert_longform_feedback_to_html(self, response_items): + """ + Take in a dictionary, and return html strings for display to student. + Input: + response_items: Dictionary with keys success, feedback. + if success is True, feedback should be a dictionary, with keys for + types of feedback, and the corresponding feedback values. + if success is False, feedback is actually an error string. + + NOTE: this will need to change when we integrate peer grading, because + that will have more complex feedback. + + Output: + String -- html that can be displayincorrect-icon.pnged to the student. + """ + + # We want to display available feedback in a particular order. + # This dictionary specifies which goes first--lower first. + priorities = {# These go at the start of the feedback + 'spelling': 0, + 'grammar': 1, + # needs to be after all the other feedback + 'markup_text': 3} + + default_priority = 2 + + def get_priority(elt): + """ + Args: + elt: a tuple of feedback-type, feedback + Returns: + the priority for this feedback type + """ + return priorities.get(elt[0], default_priority) + + def encode_values(feedback_type, value): + feedback_type = str(feedback_type).encode('ascii', 'ignore') + if not isinstance(value, basestring): + value = str(value) + value = value.encode('ascii', 'ignore') + return feedback_type, value + + def format_feedback(feedback_type, value): + feedback_type, value = encode_values(feedback_type, value) + feedback = """ +
+ {value} +
+ """.format(feedback_type=feedback_type, value=value) + return feedback + + def format_feedback_hidden(feedback_type, value): + feedback_type, value = encode_values(feedback_type, value) + feedback = """ + + """.format(feedback_type=feedback_type, value=value) + return feedback + + # TODO (vshnayder): design and document the details of this format so + # that we can do proper escaping here (e.g. are the graders allowed to + # include HTML?) + + for tag in ['success', 'feedback', 'submission_id', 'grader_id']: + if tag not in response_items: + return format_feedback('errors', 'Error getting feedback') + + feedback_items = response_items['feedback'] + try: + feedback = json.loads(feedback_items) + except (TypeError, ValueError): + log.exception("feedback_items have invalid json %r", feedback_items) + return format_feedback('errors', 'Could not parse feedback') + + if response_items['success']: + if len(feedback) == 0: + return format_feedback('errors', 'No feedback available') + + feedback_lst = sorted(feedback.items(), key=get_priority) + feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) + else: + feedback_list_part1 = format_feedback('errors', response_items['feedback']) + + feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) + for feedback_type, value in response_items.items() + if feedback_type in ['submission_id', 'grader_id']])) + + return u"\n".join([feedback_list_part1, feedback_list_part2]) + + def _format_feedback(self, response_items): + """ + Input: + Dictionary called feedback. Must contain keys seen below. + Output: + Return error message or feedback template + """ + + log.debug(response_items) + rubric_feedback="" + feedback = self._convert_longform_feedback_to_html(response_items) + if response_items['rubric_scores_complete']==True: + rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml']) + + if not response_items['success']: + return system.render_template("open_ended_error.html", + {'errors': feedback}) + + feedback_template = render_to_string("open_ended_feedback.html", { + 'grader_type': response_items['grader_type'], + 'score': "{0} / {1}".format(response_items['score'], self.max_score()), + 'feedback': feedback, + 'rubric_feedback' : rubric_feedback + }) + + return feedback_template + + + def _parse_score_msg(self, score_msg, join_feedback=True): + """ + Grader reply is a JSON-dump of the following dict + { 'correct': True/False, + 'score': Numeric value (floating point is okay) to assign to answer + 'msg': grader_msg + 'feedback' : feedback from grader + } + + Returns (valid_score_msg, correct, score, msg): + valid_score_msg: Flag indicating valid score_msg format (Boolean) + correct: Correctness of submission (Boolean) + score: Points to be assigned (numeric, can be float) + """ + fail = {'valid': False, 'score': 0, 'feedback': ''} + try: + score_result = json.loads(score_msg) + except (TypeError, ValueError): + error_message = ("External grader message should be a JSON-serialized dict." + " Received score_msg = {0}".format(score_msg)) + log.error(error_message) + fail['feedback'] = error_message + return fail + + if not isinstance(score_result, dict): + error_message = ("External grader message should be a JSON-serialized dict." + " Received score_result = {0}".format(score_result)) + log.error(error_message) + fail['feedback'] = error_message + return fail + + for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: + if tag not in score_result: + error_message = ("External grader message is missing required tag: {0}" + .format(tag)) + log.error(error_message) + fail['feedback'] = error_message + return fail + #This is to support peer grading + if isinstance(score_result['score'], list): + feedback_items = [] + for i in xrange(0, len(score_result['score'])): + new_score_result = { + 'score': score_result['score'][i], + 'feedback': score_result['feedback'][i], + 'grader_type': score_result['grader_type'], + 'success': score_result['success'], + 'grader_id': score_result['grader_id'][i], + 'submission_id': score_result['submission_id'], + 'rubric_scores_complete' : score_result['rubric_scores_complete'], + 'rubric_xml' : score_result['rubric_xml'], + } + feedback_items.append(self._format_feedback(new_score_result)) + if join_feedback: + feedback = "".join(feedback_items) + else: + feedback = feedback_items + score = int(median(score_result['score'])) + else: + #This is for instructor and ML grading + feedback = self._format_feedback(score_result) + score = score_result['score'] + + self.submission_id = score_result['submission_id'] + self.grader_id = score_result['grader_id'] + + return {'valid': True, 'score': score, 'feedback': feedback} + + def latest_post_assessment(self, short_feedback=False, join_feedback=True): + """ + Gets the latest feedback, parses, and returns + @param short_feedback: If the long feedback is wanted or not + @return: Returns formatted feedback + """ + if not self.history: + return "" + + feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), join_feedback=join_feedback) + if not short_feedback: + return feedback_dict['feedback'] if feedback_dict['valid'] else '' + if feedback_dict['valid']: + short_feedback = self._convert_longform_feedback_to_html( + json.loads(self.history[-1].get('post_assessment', ""))) + return short_feedback if feedback_dict['valid'] else '' + + def format_feedback_with_evaluation(self, feedback): + """ + Renders a given html feedback into an evaluation template + @param feedback: HTML feedback + @return: Rendered html + """ + context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} + html = render_to_string('open_ended_evaluation.html', context) + return html + + def handle_ajax(self, dispatch, get, system): + ''' + This is called by courseware.module_render, to handle an AJAX call. + "get" is request.POST. + + Returns a json dictionary: + { 'progress_changed' : True/False, + 'progress' : 'none'/'in_progress'/'done', + } + ''' + handlers = { + 'save_answer': self.save_answer, + 'score_update': self.update_score, + 'save_post_assessment': self.message_post, + 'skip_post_assessment': self.skip_post_assessment, + 'check_for_score': self.check_for_score, + } + + if dispatch not in handlers: + return 'Error' + + before = self.get_progress() + d = handlers[dispatch](get, system) + after = self.get_progress() + d.update({ + 'progress_changed': after != before, + 'progress_status': Progress.to_js_status_str(after), + }) + return json.dumps(d, cls=ComplexEncoder) + + def check_for_score(self, get, system): + """ + Checks to see if a score has been received yet. + @param get: AJAX get dictionary + @param system: Modulesystem (needed to align with other ajax functions) + @return: Returns the current state + """ + state = self.state + return {'state': state} + + def save_answer(self, get, system): + """ + Saves a student answer + @param get: AJAX get dictionary + @param system: modulesystem + @return: Success indicator + """ + if self.attempts > self.max_attempts: + # If too many attempts, prevent student from saving answer and + # seeing rubric. In normal use, students shouldn't see this because + # they won't see the reset button once they're out of attempts. + return { + 'success': False, + 'error': 'Too many attempts.' + } + + if self.state != self.INITIAL: + return self.out_of_sync_error(get) + + # add new history element with answer and empty score and hint. + self.new_history_entry(get['student_answer']) + self.send_to_grader(get['student_answer'], system) + self.change_state(self.ASSESSING) + + return {'success': True, } + + def update_score(self, get, system): + """ + Updates the current score via ajax. Called by xqueue. + Input: AJAX get dictionary, modulesystem + Output: None + """ + queuekey = get['queuekey'] + score_msg = get['xqueue_body'] + #TODO: Remove need for cmap + self._update_score(score_msg, queuekey, system) + + return dict() # No AJAX return is needed + + def get_html(self, system): + """ + Gets the HTML for this problem and renders it + Input: Modulesystem object + Output: Rendered HTML + """ + #set context variables and render template + if self.state != self.INITIAL: + latest = self.latest_answer() + previous_answer = latest if latest is not None else self.initial_display + post_assessment = self.latest_post_assessment() + score = self.latest_score() + correct = 'correct' if self.is_submission_correct(score) else 'incorrect' + else: + post_assessment = "" + correct = "" + previous_answer = self.initial_display + + context = { + 'prompt': self.prompt, + 'previous_answer': previous_answer, + 'state': self.state, + 'allow_reset': self._allow_reset(), + 'rows': 30, + 'cols': 80, + 'id': 'open_ended', + 'msg': post_assessment, + 'child_type': 'openended', + 'correct': correct, + } + log.debug(context) + html = system.render_template('open_ended.html', context) + return html + + +class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): + """ + Module for adding open ended response questions to courses + """ + mako_template = "widgets/html-edit.html" + module_class = OpenEndedModule + filename_extension = "xml" + + stores_state = True + has_score = True + template_dir_name = "openended" + + js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} + js_module_name = "HTMLEditingDescriptor" + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the open ended parameters into a dictionary. + + Returns: + { + 'oeparam': 'some-html' + } + """ + for child in ['openendedparam']: + if len(xml_object.xpath(child)) != 1: + raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child)) + + def parse(k): + """Assumes that xml_object has child k""" + return xml_object.xpath(k)[0] + + return {'oeparam': parse('openendedparam'), } + + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + elt = etree.Element('openended') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + elt.append(child_node) + + for child in ['openendedparam']: + add_child(child) + + return elt + + diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py new file mode 100644 index 0000000000..2ba9528237 --- /dev/null +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -0,0 +1,263 @@ +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys +import hashlib +import capa.xqueue_interface as xqueue_interface + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +from capa.util import * + +from datetime import datetime + +log = logging.getLogger("mitx.courseware") + +# Set the default number of max attempts. Should be 1 for production +# Set higher for debugging/testing +# attempts specified in xml definition overrides this. +MAX_ATTEMPTS = 1 + +# Set maximum available number of points. +# Overriden by max_score specified in xml. +MAX_SCORE = 1 + +class OpenEndedChild(): + """ + States: + + initial (prompt, textbox shown) + | + assessing (read-only textbox, rubric + assessment input shown for self assessment, response queued for open ended) + | + post_assessment (read-only textbox, read-only rubric and assessment, hint input box shown) + | + done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows + a reset button that goes back to initial state. Saves previous + submissions too.) + """ + + DEFAULT_QUEUE = 'open-ended' + DEFAULT_MESSAGE_QUEUE = 'open-ended-message' + max_inputfields = 1 + + STATE_VERSION = 1 + + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + POST_ASSESSMENT = 'post_assessment' + DONE = 'done' + + #This is used to tell students where they are at in the module + HUMAN_NAMES = { + 'initial': 'Started', + 'assessing': 'Being scored', + 'post_assessment': 'Scoring finished', + 'done': 'Problem complete', + } + + def __init__(self, system, location, definition, descriptor, static_data, + instance_state=None, shared_state=None, **kwargs): + # Load instance state + if instance_state is not None: + instance_state = json.loads(instance_state) + else: + instance_state = {} + + # History is a list of tuples of (answer, score, hint), where hint may be + # None for any element, and score and hint can be None for the last (current) + # element. + # Scores are on scale from 0 to max_score + self.history = instance_state.get('history', []) + + self.state = instance_state.get('state', self.INITIAL) + + self.created = instance_state.get('created', False) + + self.attempts = instance_state.get('attempts', 0) + self.max_attempts = static_data['max_attempts'] + + self.prompt = static_data['prompt'] + self.rubric = static_data['rubric'] + + # Used for progress / grading. Currently get credit just for + # completion (doesn't matter if you self-assessed correct/incorrect). + self._max_score = static_data['max_score'] + + self.setup_response(system, location, definition, descriptor) + + def setup_response(self, system, location, definition, descriptor): + """ + Needs to be implemented by the inheritors of this module. Sets up additional fields used by the child modules. + @param system: Modulesystem + @param location: Module location + @param definition: XML definition + @param descriptor: Descriptor of the module + @return: None + """ + pass + + def latest_answer(self): + """None if not available""" + if not self.history: + return "" + return self.history[-1].get('answer', "") + + def latest_score(self): + """None if not available""" + if not self.history: + return None + return self.history[-1].get('score') + + def latest_post_assessment(self): + """None if not available""" + if not self.history: + return "" + return self.history[-1].get('post_assessment', "") + + def new_history_entry(self, answer): + """ + Adds a new entry to the history dictionary + @param answer: The student supplied answer + @return: None + """ + self.history.append({'answer': answer}) + + def record_latest_score(self, score): + """Assumes that state is right, so we're adding a score to the latest + history element""" + self.history[-1]['score'] = score + + def record_latest_post_assessment(self, post_assessment): + """Assumes that state is right, so we're adding a score to the latest + history element""" + self.history[-1]['post_assessment'] = post_assessment + + def change_state(self, new_state): + """ + A centralized place for state changes--allows for hooks. If the + current state matches the old state, don't run any hooks. + """ + if self.state == new_state: + return + + self.state = new_state + + if self.state == self.DONE: + self.attempts += 1 + + def get_instance_state(self): + """ + Get the current score and state + """ + + state = { + 'version': self.STATE_VERSION, + 'history': self.history, + 'state': self.state, + 'max_score': self._max_score, + 'attempts': self.attempts, + 'created': False, + } + return json.dumps(state) + + def _allow_reset(self): + """Can the module be reset?""" + return (self.state == self.DONE and self.attempts < self.max_attempts) + + def max_score(self): + """ + Return max_score + """ + return self._max_score + + def get_score(self): + """ + Returns the last score in the list + """ + score = self.latest_score() + return {'score': score if score is not None else 0, + 'total': self._max_score} + + def reset(self, system): + """ + If resetting is allowed, reset the state. + + Returns {'success': bool, 'error': msg} + (error only present if not success) + """ + self.change_state(self.INITIAL) + return {'success': True} + + def get_progress(self): + ''' + For now, just return last score / max_score + ''' + if self._max_score > 0: + try: + return Progress(self.get_score()['score'], self._max_score) + except Exception as err: + log.exception("Got bad progress") + return None + return None + + def out_of_sync_error(self, get, msg=''): + """ + return dict out-of-sync error message, and also log. + """ + log.warning("Assessment module state out sync. state: %r, get: %r. %s", + self.state, get, msg) + return {'success': False, + 'error': 'The problem state got out-of-sync'} + + def get_html(self): + """ + Needs to be implemented by inheritors. Renders the HTML that students see. + @return: + """ + pass + + def handle_ajax(self): + """ + Needs to be implemented by child modules. Handles AJAX events. + @return: + """ + pass + + def is_submission_correct(self, score): + """ + Checks to see if a given score makes the answer correct. Very naive right now (>66% is correct) + @param score: Numeric score. + @return: Boolean correct. + """ + correct = False + if(isinstance(score, (int, long, float, complex))): + score_ratio = int(score) / float(self.max_score()) + correct = (score_ratio >= 0.66) + return correct + + def is_last_response_correct(self): + """ + Checks to see if the last response in the module is correct. + @return: 'correct' if correct, otherwise 'incorrect' + """ + score = self.get_score()['score'] + correctness = 'correct' if self.is_submission_correct(score) else 'incorrect' + return correctness + + + diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index eb8a275d35..940b61c557 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -1,10 +1,3 @@ -""" -A Self Assessment module that allows students to write open-ended responses, -submit, then see a rubric and rate themselves. Persists student supplied -hints, answers, and assessment judgment (currently only correct/incorrect). -Parses xml definition file--see below for exact format. -""" - import copy from fs.errors import ResourceNotFoundError import itertools @@ -26,205 +19,50 @@ from .stringify import stringify_children from .x_module import XModule from .xml_module import XmlDescriptor from xmodule.modulestore import Location +import openendedchild + +from combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") -# Set the default number of max attempts. Should be 1 for production -# Set higher for debugging/testing -# attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 1 - -# Set maximum available number of points. -# Overriden by max_score specified in xml. -MAX_SCORE = 1 - -class SelfAssessmentModule(XModule): +class SelfAssessmentModule(openendedchild.OpenEndedChild): """ - States: + A Self Assessment module that allows students to write open-ended responses, + submit, then see a rubric and rate themselves. Persists student supplied + hints, answers, and assessment judgment (currently only correct/incorrect). + Parses xml definition file--see below for exact format. - initial (prompt, textbox shown) - | - assessing (read-only textbox, rubric + assessment input shown) - | - request_hint (read-only textbox, read-only rubric and assessment, hint input box shown) - | - done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows - a reset button that goes back to initial state. Saves previous - submissions too.) + Sample XML format: + + + What hint about this problem would you give to someone? + + + Save Succcesful. Thanks for participating! + + """ - STATE_VERSION = 1 - - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - REQUEST_HINT = 'request_hint' - DONE = 'done' - - js = {'coffee': [resource_string(__name__, 'js/src/selfassessment/display.coffee')]} - js_module_name = "SelfAssessment" - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - + def setup_response(self, system, location, definition, descriptor): """ - Definition file should have 4 blocks -- prompt, rubric, submitmessage, hintprompt, - and two optional attributes: - attempts, which should be an integer that defaults to 1. - If it's > 1, the student will be able to re-submit after they see - the rubric. - max_score, which should be an integer that defaults to 1. - It defines the maximum number of points a student can get. Assumed to be integer scale - from 0 to max_score, with an interval of 1. - - Note: all the submissions are stored. - - Sample file: - - - - Insert prompt text here. (arbitrary html) - - - Insert grading rubric here. (arbitrary html) - - - Please enter a hint below: (arbitrary html) - - - Thanks for submitting! (arbitrary html) - - + Sets up the module + @param system: Modulesystem + @param location: location, to let the module know where it is. + @param definition: XML definition of the module. + @param descriptor: SelfAssessmentDescriptor + @return: None """ - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} - - instance_state = self.convert_state_to_current_format(instance_state) - - # History is a list of tuples of (answer, score, hint), where hint may be - # None for any element, and score and hint can be None for the last (current) - # element. - # Scores are on scale from 0 to max_score - self.history = instance_state.get('history', []) - - self.state = instance_state.get('state', 'initial') - - self.attempts = instance_state.get('attempts', 0) - self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) - - # Used for progress / grading. Currently get credit just for - # completion (doesn't matter if you self-assessed correct/incorrect). - self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - - self.rubric = definition['rubric'] - self.prompt = definition['prompt'] self.submit_message = definition['submitmessage'] self.hint_prompt = definition['hintprompt'] + self.prompt = stringify_children(self.prompt) + self.rubric = stringify_children(self.rubric) - - def latest_answer(self): - """None if not available""" - if not self.history: - return None - return self.history[-1].get('answer') - - def latest_score(self): - """None if not available""" - if not self.history: - return None - return self.history[-1].get('score') - - def latest_hint(self): - """None if not available""" - if not self.history: - return None - return self.history[-1].get('hint') - - def new_history_entry(self, answer): - self.history.append({'answer': answer}) - - def record_latest_score(self, score): - """Assumes that state is right, so we're adding a score to the latest - history element""" - self.history[-1]['score'] = score - - def record_latest_hint(self, hint): - """Assumes that state is right, so we're adding a score to the latest - history element""" - self.history[-1]['hint'] = hint - - - def change_state(self, new_state): + def get_html(self, system): """ - A centralized place for state changes--allows for hooks. If the - current state matches the old state, don't run any hooks. + Gets context and renders HTML that represents the module + @param system: Modulesystem + @return: Rendered HTML """ - if self.state == new_state: - return - - self.state = new_state - - if self.state == self.DONE: - self.attempts += 1 - - @staticmethod - def convert_state_to_current_format(old_state): - """ - This module used to use a problematic state representation. This method - converts that into the new format. - - Args: - old_state: dict of state, as passed in. May be old. - - Returns: - new_state: dict of new state - """ - if old_state.get('version', 0) == SelfAssessmentModule.STATE_VERSION: - # already current - return old_state - - # for now, there's only one older format. - - new_state = {'version': SelfAssessmentModule.STATE_VERSION} - - def copy_if_present(key): - if key in old_state: - new_state[key] = old_state[key] - - for to_copy in ['attempts', 'state']: - copy_if_present(to_copy) - - # The answers, scores, and hints need to be kept together to avoid them - # getting out of sync. - - # NOTE: Since there's only one problem with a few hundred submissions - # in production so far, not trying to be smart about matching up hints - # and submissions in cases where they got out of sync. - - student_answers = old_state.get('student_answers', []) - scores = old_state.get('scores', []) - hints = old_state.get('hints', []) - - new_state['history'] = [ - {'answer': answer, - 'score': score, - 'hint': hint} - for answer, score, hint in itertools.izip_longest( - student_answers, scores, hints)] - return new_state - - - def _allow_reset(self): - """Can the module be reset?""" - return self.state == self.DONE and self.attempts < self.max_attempts - - def get_html(self): #set context variables and render template if self.state != self.INITIAL: latest = self.latest_answer() @@ -235,46 +73,20 @@ class SelfAssessmentModule(XModule): context = { 'prompt': self.prompt, 'previous_answer': previous_answer, - 'ajax_url': self.system.ajax_url, - 'initial_rubric': self.get_rubric_html(), - 'initial_hint': self.get_hint_html(), + 'ajax_url': system.ajax_url, + 'initial_rubric': self.get_rubric_html(system), + 'initial_hint': self.get_hint_html(system), 'initial_message': self.get_message_html(), 'state': self.state, 'allow_reset': self._allow_reset(), + 'child_type': 'selfassessment', } - html = self.system.render_template('self_assessment_prompt.html', context) - # cdodge: perform link substitutions for any references to course static content (e.g. images) - return rewrite_links(html, self.rewrite_content_links) - - def max_score(self): - """ - Return max_score - """ - return self._max_score - - def get_score(self): - """ - Returns the last score in the list - """ - score = self.latest_score() - return {'score': score if score is not None else 0, - 'total': self._max_score} - - def get_progress(self): - ''' - For now, just return last score / max_score - ''' - if self._max_score > 0: - try: - return Progress(self.get_score()['score'], self._max_score) - except Exception as err: - log.exception("Got bad progress") - return None - return None + html = system.render_template('self_assessment_prompt.html', context) + return html - def handle_ajax(self, dispatch, get): + def handle_ajax(self, dispatch, get, system): """ This is called by courseware.module_render, to handle an AJAX call. "get" is request.POST. @@ -288,15 +100,14 @@ class SelfAssessmentModule(XModule): handlers = { 'save_answer': self.save_answer, 'save_assessment': self.save_assessment, - 'save_hint': self.save_hint, - 'reset': self.reset, + 'save_post_assessment': self.save_hint, } if dispatch not in handlers: return 'Error' before = self.get_progress() - d = handlers[dispatch](get) + d = handlers[dispatch](get, system) after = self.get_progress() d.update({ 'progress_changed': after != before, @@ -304,37 +115,30 @@ class SelfAssessmentModule(XModule): }) return json.dumps(d, cls=ComplexEncoder) - def out_of_sync_error(self, get, msg=''): - """ - return dict out-of-sync error message, and also log. - """ - log.warning("Assessment module state out sync. state: %r, get: %r. %s", - self.state, get, msg) - return {'success': False, - 'error': 'The problem state got out-of-sync'} - - def get_rubric_html(self): + def get_rubric_html(self, system): """ Return the appropriate version of the rubric, based on the state. """ if self.state == self.INITIAL: return '' + rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric) + # we'll render it - context = {'rubric': self.rubric, - 'max_score' : self._max_score, - } + context = {'rubric': rubric_html, + 'max_score': self._max_score, + } if self.state == self.ASSESSING: context['read_only'] = False - elif self.state in (self.REQUEST_HINT, self.DONE): + elif self.state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: raise ValueError("Illegal state '%r'" % self.state) - return self.system.render_template('self_assessment_rubric.html', context) + return system.render_template('self_assessment_rubric.html', context) - def get_hint_html(self): + def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ @@ -343,7 +147,7 @@ class SelfAssessmentModule(XModule): if self.state == self.DONE: # display the previous hint - latest = self.latest_hint() + latest = self.latest_post_assessment() hint = latest if latest is not None else '' else: hint = '' @@ -351,14 +155,14 @@ class SelfAssessmentModule(XModule): context = {'hint_prompt': self.hint_prompt, 'hint': hint} - if self.state == self.REQUEST_HINT: + if self.state == self.POST_ASSESSMENT: context['read_only'] = False elif self.state == self.DONE: context['read_only'] = True else: raise ValueError("Illegal state '%r'" % self.state) - return self.system.render_template('self_assessment_hint.html', context) + return system.render_template('self_assessment_hint.html', context) def get_message_html(self): """ @@ -370,7 +174,7 @@ class SelfAssessmentModule(XModule): return """
{0}
""".format(self.submit_message) - def save_answer(self, get): + def save_answer(self, get, system): """ After the answer is submitted, show the rubric. @@ -401,10 +205,10 @@ class SelfAssessmentModule(XModule): return { 'success': True, - 'rubric_html': self.get_rubric_html() - } + 'rubric_html': self.get_rubric_html(system) + } - def save_assessment(self, get): + def save_assessment(self, get, system): """ Save the assessment. If the student said they're right, don't ask for a hint, and go straight to the done state. Otherwise, do ask for a hint. @@ -429,21 +233,20 @@ class SelfAssessmentModule(XModule): self.record_latest_score(score) - d = {'success': True,} + d = {'success': True, } if score == self.max_score(): self.change_state(self.DONE) d['message_html'] = self.get_message_html() d['allow_reset'] = self._allow_reset() else: - self.change_state(self.REQUEST_HINT) - d['hint_html'] = self.get_hint_html() + self.change_state(self.POST_ASSESSMENT) + d['hint_html'] = self.get_hint_html(system) d['state'] = self.state return d - - def save_hint(self, get): + def save_hint(self, get, system): ''' Save the hint. Returns a dict { 'success': bool, @@ -453,63 +256,19 @@ class SelfAssessmentModule(XModule): with the error key only present if success is False and message_html only if True. ''' - if self.state != self.REQUEST_HINT: + if self.state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(get) - self.record_latest_hint(get['hint']) + self.record_latest_post_assessment(get['hint']) self.change_state(self.DONE) - # To the tracking logs! - event_info = { - 'selfassessment_id': self.location.url(), - 'state': { - 'version': self.STATE_VERSION, - 'history': self.history, - } - } - self.system.track_function('save_hint', event_info) - return {'success': True, 'message_html': self.get_message_html(), 'allow_reset': self._allow_reset()} - def reset(self, get): - """ - If resetting is allowed, reset the state. - - Returns {'success': bool, 'error': msg} - (error only present if not success) - """ - if self.state != self.DONE: - return self.out_of_sync_error(get) - - if self.attempts > self.max_attempts: - return { - 'success': False, - 'error': 'Too many attempts.' - } - self.change_state(self.INITIAL) - return {'success': True} - - - def get_instance_state(self): - """ - Get the current score and state - """ - - state = { - 'version': self.STATE_VERSION, - 'history': self.history, - 'state': self.state, - 'max_score': self._max_score, - 'attempts': self.attempts, - } - return json.dumps(state) - - class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """ Module for adding self assessment questions to courses @@ -532,13 +291,11 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): Returns: { - 'rubric': 'some-html', - 'prompt': 'some-html', 'submitmessage': 'some-html' 'hintprompt': 'some-html' } """ - expected_children = ['rubric', 'prompt', 'submitmessage', 'hintprompt'] + expected_children = ['submitmessage', 'hintprompt'] for child in expected_children: if len(xml_object.xpath(child)) != 1: raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child)) @@ -547,12 +304,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) - return {'rubric': parse('rubric'), - 'prompt': parse('prompt'), - 'submitmessage': parse('submitmessage'), + return {'submitmessage': parse('submitmessage'), 'hintprompt': parse('hintprompt'), - } - + } def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' @@ -563,7 +317,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): child_node = etree.fromstring(child_str) elt.append(child_node) - for child in ['rubric', 'prompt', 'submitmessage', 'hintprompt']: + for child in ['submitmessage', 'hintprompt']: add_child(child) return elt diff --git a/common/lib/xmodule/xmodule/tests/test_course_module.py b/common/lib/xmodule/xmodule/tests/test_course_module.py new file mode 100644 index 0000000000..63eaec1f61 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_course_module.py @@ -0,0 +1,90 @@ +import unittest +from time import strptime, gmtime +from fs.memoryfs import MemoryFS + +from mock import Mock, patch + +from xmodule.modulestore.xml import ImportSystem, XMLModuleStore + + +ORG = 'test_org' +COURSE = 'test_course' + +NOW = strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00') + + +class DummySystem(ImportSystem): + @patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS()) + def __init__(self, load_error_modules): + + xmlstore = XMLModuleStore("data_dir", course_dirs=[], + load_error_modules=load_error_modules) + course_id = "/".join([ORG, COURSE, 'test_run']) + course_dir = "test_dir" + policy = {} + error_tracker = Mock() + parent_tracker = Mock() + + super(DummySystem, self).__init__( + xmlstore, + course_id, + course_dir, + policy, + error_tracker, + parent_tracker, + load_error_modules=load_error_modules, + ) + + +class IsNewCourseTestCase(unittest.TestCase): + """Make sure the property is_new works on courses""" + @staticmethod + def get_dummy_course(start, is_new=None, load_error_modules=True): + """Get a dummy course""" + + system = DummySystem(load_error_modules) + is_new = '' if is_new is None else 'is_new="{0}"'.format(is_new).lower() + + start_xml = ''' + + + Two houses, ... + + + '''.format(org=ORG, course=COURSE, start=start, is_new=is_new) + + return system.process_xml(start_xml) + + @patch('xmodule.course_module.time.gmtime') + def test_non_started_yet(self, gmtime_mock): + descriptor = self.get_dummy_course(start='2013-01-05T12:00') + gmtime_mock.return_value = NOW + assert(descriptor.is_new == True) + assert(descriptor.days_until_start == 4) + + @patch('xmodule.course_module.time.gmtime') + def test_already_started(self, gmtime_mock): + gmtime_mock.return_value = NOW + + descriptor = self.get_dummy_course(start='2012-12-02T12:00') + assert(descriptor.is_new == False) + assert(descriptor.days_until_start < 0) + + @patch('xmodule.course_module.time.gmtime') + def test_is_new_set(self, gmtime_mock): + gmtime_mock.return_value = NOW + + descriptor = self.get_dummy_course(start='2012-12-02T12:00', is_new=True) + assert(descriptor.is_new == True) + assert(descriptor.days_until_start < 0) + + descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=False) + assert(descriptor.is_new == False) + assert(descriptor.days_until_start > 0) + + descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=True) + assert(descriptor.is_new == True) + assert(descriptor.days_until_start > 0) diff --git a/common/lib/xmodule/xmodule/tests/test_export.py b/common/lib/xmodule/xmodule/tests/test_export.py index aeebc6da6b..7605155a6c 100644 --- a/common/lib/xmodule/xmodule/tests/test_export.py +++ b/common/lib/xmodule/xmodule/tests/test_export.py @@ -39,9 +39,12 @@ def strip_filenames(descriptor): class RoundTripTestCase(unittest.TestCase): - '''Check that our test courses roundtrip properly''' + ''' Check that our test courses roundtrip properly. + Same course imported , than exported, then imported again. + And we compare original import with second import (after export). + Thus we make sure that export and import work properly. + ''' def check_export_roundtrip(self, data_dir, course_dir): - root_dir = path(mkdtemp()) print "Copying test course to temp dir {0}".format(root_dir) @@ -117,3 +120,7 @@ class RoundTripTestCase(unittest.TestCase): def test_selfassessment_roundtrip(self): #Test selfassessment xmodule to see if it exports correctly self.check_export_roundtrip(DATA_DIR,"self_assessment") + + def test_graphicslidertool_roundtrip(self): + #Test graphicslidertool xmodule to see if it exports correctly + self.check_export_roundtrip(DATA_DIR,"graphic_slider_tool") diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py index 77532959d7..90ec112f19 100644 --- a/common/lib/xmodule/xmodule/tests/test_import.py +++ b/common/lib/xmodule/xmodule/tests/test_import.py @@ -352,3 +352,19 @@ class ImportTestCase(unittest.TestCase): sa_sample = modulestore.get_instance(sa_id, location) #10 attempts is hard coded into SampleQuestion, which is the url_name of a selfassessment xml tag self.assertEqual(sa_sample.metadata['attempts'], '10') + + def test_graphicslidertool_import(self): + ''' + Check to see if definition_from_xml in gst_module.py + works properly. Pulls data from the graphic_slider_tool directory + in the test data directory. + ''' + modulestore = XMLModuleStore(DATA_DIR, course_dirs=['graphic_slider_tool']) + + sa_id = "edX/gst_test/2012_Fall" + location = Location(["i4x", "edX", "gst_test", "graphical_slider_tool", "sample_gst"]) + gst_sample = modulestore.get_instance(sa_id, location) + render_string_from_sample_gst_xml = """ + \ +""".strip() + self.assertEqual(gst_sample.definition['render'], render_string_from_sample_gst_xml) diff --git a/common/lib/xmodule/xmodule/video_module.py b/common/lib/xmodule/xmodule/video_module.py index 9a22950ca8..801e70fd06 100644 --- a/common/lib/xmodule/xmodule/video_module.py +++ b/common/lib/xmodule/xmodule/video_module.py @@ -7,6 +7,9 @@ from pkg_resources import resource_string, resource_listdir from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor +import datetime +import time + log = logging.getLogger(__name__) @@ -33,6 +36,7 @@ class VideoModule(XModule): self.show_captions = xmltree.get('show_captions', 'true') self.source = self._get_source(xmltree) self.track = self._get_track(xmltree) + self.start_time, self.end_time = self._get_timeframe(xmltree) if instance_state is not None: state = json.loads(instance_state) @@ -42,11 +46,11 @@ class VideoModule(XModule): def _get_source(self, xmltree): # find the first valid source return self._get_first_external(xmltree, 'source') - + def _get_track(self, xmltree): # find the first valid track return self._get_first_external(xmltree, 'track') - + def _get_first_external(self, xmltree, tag): """ Will return the first valid element @@ -61,6 +65,23 @@ class VideoModule(XModule): break return result + def _get_timeframe(self, xmltree): + """ Converts 'from' and 'to' parameters in video tag to seconds. + If there are no parameters, returns empty string. """ + + def parse_time(s): + """Converts s in '12:34:45' format to seconds. If s is + None, returns empty string""" + if s is None: + return '' + else: + x = time.strptime(s, '%H:%M:%S') + return datetime.timedelta(hours=x.tm_hour, + minutes=x.tm_min, + seconds=x.tm_sec).total_seconds() + + return parse_time(xmltree.get('from')), parse_time(xmltree.get('to')) + def handle_ajax(self, dispatch, get): ''' Handle ajax calls to this video. @@ -98,11 +119,13 @@ class VideoModule(XModule): 'id': self.location.html_id(), 'position': self.position, 'source': self.source, - 'track' : self.track, + 'track': self.track, 'display_name': self.display_name, # TODO (cpennington): This won't work when we move to data that isn't on the filesystem 'data_dir': self.metadata['data_dir'], - 'show_captions': self.show_captions + 'show_captions': self.show_captions, + 'start': self.start_time, + 'end': self.end_time }) diff --git a/common/static/js/vendor/RequireJS.js b/common/static/js/vendor/RequireJS.js new file mode 100644 index 0000000000..a0526930ef --- /dev/null +++ b/common/static/js/vendor/RequireJS.js @@ -0,0 +1,57 @@ +/* + * This file is a wrapper for the Require JS file and module loader. Please see + * the discussion at: + * + * https://edx-wiki.atlassian.net/wiki/display/LMS/Integration+of+Require+JS+into+the+system + */ + +var RequireJS = function() { + +// Below is the unmodified minified version of Require JS. The latest can be +// found at: +// +// http://requirejs.org/docs/download.html + +/* + RequireJS 2.1.2 Copyright (c) 2010-2012, The Dojo Foundation All Rights Reserved. + Available via the MIT or new BSD license. + see: http://github.com/jrburke/requirejs for details +*/ +var requirejs,require,define; +(function(Y){function H(b){return"[object Function]"===L.call(b)}function I(b){return"[object Array]"===L.call(b)}function x(b,c){if(b){var d;for(d=0;dthis.depCount&&!this.defined){if(H(n)){if(this.events.error)try{e=j.execCb(c,n,b,e)}catch(d){a=d}else e=j.execCb(c,n,b,e);this.map.isDefine&&((b=this.module)&&void 0!==b.exports&&b.exports!==this.exports?e=b.exports:void 0===e&&this.usingExports&&(e=this.exports));if(a)return a.requireMap=this.map,a.requireModules=[this.map.id],a.requireType="define",C(this.error=a)}else e=n;this.exports=e;if(this.map.isDefine&& +!this.ignore&&(p[c]=e,l.onResourceLoad))l.onResourceLoad(j,this.map,this.depMaps);delete k[c];this.defined=!0}this.defining=!1;this.defined&&!this.defineEmitted&&(this.defineEmitted=!0,this.emit("defined",this.exports),this.defineEmitComplete=!0)}}else this.fetch()}},callPlugin:function(){var a=this.map,b=a.id,d=h(a.prefix);this.depMaps.push(d);s(d,"defined",t(this,function(e){var n,d;d=this.map.name;var v=this.map.parentMap?this.map.parentMap.name:null,f=j.makeRequire(a.parentMap,{enableBuildCallback:!0, +skipMap:!0});if(this.map.unnormalized){if(e.normalize&&(d=e.normalize(d,function(a){return c(a,v,!0)})||""),e=h(a.prefix+"!"+d,this.map.parentMap),s(e,"defined",t(this,function(a){this.init([],function(){return a},null,{enabled:!0,ignore:!0})})),d=i(k,e.id)){this.depMaps.push(e);if(this.events.error)d.on("error",t(this,function(a){this.emit("error",a)}));d.enable()}}else n=t(this,function(a){this.init([],function(){return a},null,{enabled:!0})}),n.error=t(this,function(a){this.inited=!0;this.error= +a;a.requireModules=[b];E(k,function(a){0===a.map.id.indexOf(b+"_unnormalized")&&delete k[a.map.id]});C(a)}),n.fromText=t(this,function(e,c){var d=a.name,u=h(d),v=O;c&&(e=c);v&&(O=!1);q(u);r(m.config,b)&&(m.config[d]=m.config[b]);try{l.exec(e)}catch(k){throw Error("fromText eval for "+d+" failed: "+k);}v&&(O=!0);this.depMaps.push(u);j.completeLoad(d);f([d],n)}),e.load(a.name,f,n,m)}));j.enable(d,this);this.pluginMaps[d.id]=d},enable:function(){this.enabling=this.enabled=!0;x(this.depMaps,t(this,function(a, +b){var c,e;if("string"===typeof a){a=h(a,this.map.isDefine?this.map:this.map.parentMap,!1,!this.skipMap);this.depMaps[b]=a;if(c=i(N,a.id)){this.depExports[b]=c(this);return}this.depCount+=1;s(a,"defined",t(this,function(a){this.defineDep(b,a);this.check()}));this.errback&&s(a,"error",this.errback)}c=a.id;e=k[c];!r(N,c)&&(e&&!e.enabled)&&j.enable(a,this)}));E(this.pluginMaps,t(this,function(a){var b=i(k,a.id);b&&!b.enabled&&j.enable(a,this)}));this.enabling=!1;this.check()},on:function(a,b){var c= +this.events[a];c||(c=this.events[a]=[]);c.push(b)},emit:function(a,b){x(this.events[a],function(a){a(b)});"error"===a&&delete this.events[a]}};j={config:m,contextName:b,registry:k,defined:p,urlFetched:S,defQueue:F,Module:W,makeModuleMap:h,nextTick:l.nextTick,configure:function(a){a.baseUrl&&"/"!==a.baseUrl.charAt(a.baseUrl.length-1)&&(a.baseUrl+="/");var b=m.pkgs,c=m.shim,e={paths:!0,config:!0,map:!0};E(a,function(a,b){e[b]?"map"===b?Q(m[b],a,!0,!0):Q(m[b],a,!0):m[b]=a});a.shim&&(E(a.shim,function(a, +b){I(a)&&(a={deps:a});if((a.exports||a.init)&&!a.exportsFn)a.exportsFn=j.makeShimExports(a);c[b]=a}),m.shim=c);a.packages&&(x(a.packages,function(a){a="string"===typeof a?{name:a}:a;b[a.name]={name:a.name,location:a.location||a.name,main:(a.main||"main").replace(ga,"").replace(aa,"")}}),m.pkgs=b);E(k,function(a,b){!a.inited&&!a.map.unnormalized&&(a.map=h(b))});if(a.deps||a.callback)j.require(a.deps||[],a.callback)},makeShimExports:function(a){return function(){var b;a.init&&(b=a.init.apply(Y,arguments)); +return b||a.exports&&Z(a.exports)}},makeRequire:function(a,d){function f(e,c,u){var i,m;d.enableBuildCallback&&(c&&H(c))&&(c.__requireJsBuild=!0);if("string"===typeof e){if(H(c))return C(J("requireargs","Invalid require call"),u);if(a&&r(N,e))return N[e](k[a.id]);if(l.get)return l.get(j,e,a);i=h(e,a,!1,!0);i=i.id;return!r(p,i)?C(J("notloaded",'Module name "'+i+'" has not been loaded yet for context: '+b+(a?"":". Use require([])"))):p[i]}K();j.nextTick(function(){K();m=q(h(null,a));m.skipMap=d.skipMap; +m.init(e,c,u,{enabled:!0});B()});return f}d=d||{};Q(f,{isBrowser:z,toUrl:function(b){var d=b.lastIndexOf("."),g=null;-1!==d&&(g=b.substring(d,b.length),b=b.substring(0,d));return j.nameToUrl(c(b,a&&a.id,!0),g)},defined:function(b){return r(p,h(b,a,!1,!0).id)},specified:function(b){b=h(b,a,!1,!0).id;return r(p,b)||r(k,b)}});a||(f.undef=function(b){w();var c=h(b,a,!0),d=i(k,b);delete p[b];delete S[c.url];delete X[b];d&&(d.events.defined&&(X[b]=d.events),delete k[b])});return f},enable:function(a){i(k, +a.id)&&q(a).enable()},completeLoad:function(a){var b,c,d=i(m.shim,a)||{},h=d.exports;for(w();F.length;){c=F.shift();if(null===c[0]){c[0]=a;if(b)break;b=!0}else c[0]===a&&(b=!0);D(c)}c=i(k,a);if(!b&&!r(p,a)&&c&&!c.inited){if(m.enforceDefine&&(!h||!Z(h)))return y(a)?void 0:C(J("nodefine","No define call for "+a,null,[a]));D([a,d.deps||[],d.exportsFn])}B()},nameToUrl:function(a,b){var c,d,h,f,j,k;if(l.jsExtRegExp.test(a))f=a+(b||"");else{c=m.paths;d=m.pkgs;f=a.split("/");for(j=f.length;0f.attachEvent.toString().indexOf("[native code"))&&!V?(O=!0,f.attachEvent("onreadystatechange", +b.onScriptLoad)):(f.addEventListener("load",b.onScriptLoad,!1),f.addEventListener("error",b.onScriptError,!1)),f.src=d,K=f,D?A.insertBefore(f,D):A.appendChild(f),K=null,f;$&&(importScripts(d),b.completeLoad(c))};z&&M(document.getElementsByTagName("script"),function(b){A||(A=b.parentNode);if(s=b.getAttribute("data-main"))return q.baseUrl||(G=s.split("/"),ba=G.pop(),ca=G.length?G.join("/")+"/":"./",q.baseUrl=ca,s=ba),s=s.replace(aa,""),q.deps=q.deps?q.deps.concat(s):[s],!0});define=function(b,c,d){var i, +f;"string"!==typeof b&&(d=c,c=b,b=null);I(c)||(d=c,c=[]);!c.length&&H(d)&&d.length&&(d.toString().replace(ia,"").replace(ja,function(b,d){c.push(d)}),c=(1===d.length?["require"]:["require","exports","module"]).concat(c));if(O){if(!(i=K))P&&"interactive"===P.readyState||M(document.getElementsByTagName("script"),function(b){if("interactive"===b.readyState)return P=b}),i=P;i&&(b||(b=i.getAttribute("data-requiremodule")),f=B[i.getAttribute("data-requirecontext")])}(f?f.defQueue:R).push([b,c,d])};define.amd= +{jQuery:!0};l.exec=function(b){return eval(b)};l(q)}})(this); + +// The object which will be globally available via RequireJS variable. +return { + 'requirejs': requirejs, + 'require': require, + 'define': define +}; +}(); // End-of: var RequireJS = function() diff --git a/common/test/data/graphic_slider_tool/README.md b/common/test/data/graphic_slider_tool/README.md new file mode 100644 index 0000000000..ec4f121ad8 --- /dev/null +++ b/common/test/data/graphic_slider_tool/README.md @@ -0,0 +1,2 @@ +This is a very very simple course, useful for debugging graphical slider tool +code. diff --git a/common/test/data/graphic_slider_tool/course.xml b/common/test/data/graphic_slider_tool/course.xml new file mode 120000 index 0000000000..49041310f6 --- /dev/null +++ b/common/test/data/graphic_slider_tool/course.xml @@ -0,0 +1 @@ +roots/2012_Fall.xml \ No newline at end of file diff --git a/common/test/data/graphic_slider_tool/course/2012_Fall.xml b/common/test/data/graphic_slider_tool/course/2012_Fall.xml new file mode 100644 index 0000000000..2983c85dd5 --- /dev/null +++ b/common/test/data/graphic_slider_tool/course/2012_Fall.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/common/test/data/graphic_slider_tool/graphical_slider_tool/sample_gst.xml b/common/test/data/graphic_slider_tool/graphical_slider_tool/sample_gst.xml new file mode 100644 index 0000000000..bd0360fde8 --- /dev/null +++ b/common/test/data/graphic_slider_tool/graphical_slider_tool/sample_gst.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + return Math.sqrt(a * a - x * x); + return -Math.sqrt(a * a - x * x); + + + + + + return -a; + + + return a; + + + 1000 + -30, 6, 30 + -30, 6, 30 + + + + diff --git a/common/test/data/graphic_slider_tool/policies/2012_Fall.json b/common/test/data/graphic_slider_tool/policies/2012_Fall.json new file mode 100644 index 0000000000..6958f8432c --- /dev/null +++ b/common/test/data/graphic_slider_tool/policies/2012_Fall.json @@ -0,0 +1,14 @@ +{ + "course/2012_Fall": { + "graceperiod": "2 days 5 hours 59 minutes 59 seconds", + "start": "2015-07-17T12:00", + "display_name": "GST Test", + "graded": "false" + }, + "chapter/Overview": { + "display_name": "Overview" + }, + "graphical_slider_tool/sample_gst": { + "display_name": "Sample GST", + }, +} diff --git a/common/test/data/graphic_slider_tool/roots/2012_Fall.xml b/common/test/data/graphic_slider_tool/roots/2012_Fall.xml new file mode 100644 index 0000000000..1dc86c4afc --- /dev/null +++ b/common/test/data/graphic_slider_tool/roots/2012_Fall.xml @@ -0,0 +1 @@ + diff --git a/common/test/data/self_assessment/course.xml b/common/test/data/self_assessment/course.xml deleted file mode 120000 index 49041310f6..0000000000 --- a/common/test/data/self_assessment/course.xml +++ /dev/null @@ -1 +0,0 @@ -roots/2012_Fall.xml \ No newline at end of file diff --git a/common/test/data/self_assessment/course.xml b/common/test/data/self_assessment/course.xml new file mode 100644 index 0000000000..ea7d5c420d --- /dev/null +++ b/common/test/data/self_assessment/course.xml @@ -0,0 +1 @@ + diff --git a/doc/course_grading.md b/doc/course_grading.md index 6dce2fa70e..5c668df5d9 100644 --- a/doc/course_grading.md +++ b/doc/course_grading.md @@ -35,6 +35,43 @@ weights of 30, 10, 10, and 10 to the 4 problems, respectively. Note that the default weight of a problem **is not 1.** The default weight of a problem is the module's max_grade. +If weighting is set, each problem is worth the number of points assigned, regardless of the number of responses it contains. + +Consider a Homework section that contains two problems. + + + ... + + +and + + + ... + ... + ... + + + + + + +Without weighting, Problem 1 is worth 25% of the assignment, and Problem 2 is worth 75% of the assignment. + +Weighting for the problems can be set in the policy.json file. + + "problem/problem1": { + "weight": 2 + }, + "problem/problem2": { + "weight": 2 + }, + +With the above weighting, Problems 1 and 2 are each worth 50% of the assignment. + +Please note: When problems have weight, the point value is automatically included in the display name *except* when “weight”: 1.When “weight”: 1, no visual change occurs in the display name, leaving the point value open to interpretation to the student. + + + ## Section Weighting Once each section has a percentage score, we must total those sections into a diff --git a/doc/development.md b/doc/development.md index ebc56fbf1b..56415b691e 100644 --- a/doc/development.md +++ b/doc/development.md @@ -19,6 +19,11 @@ Use the MacPorts package `mongodb` or the Homebrew formula `mongodb` ## Initializing Mongodb +First start up the mongo daemon. E.g. to start it up in the background +using a config file: + + mongod --config /usr/local/etc/mongod.conf & + Check out the course data directories that you want to work with into the `GITHUB_REPO_ROOT` (by default, `../data`). Then run the following command: @@ -37,8 +42,12 @@ This runs all the tests (long, uses collectstatic): If if you aren't changing static files, can run `rake test` once, then run - rake fasttest_{lms,cms} + rake fasttest_lms +or + + rake fasttest_cms + xmodule can be tested independently, with this: rake test_common/lib/xmodule diff --git a/doc/remote_gradebook.md b/doc/remote_gradebook.md new file mode 100644 index 0000000000..3743e98753 --- /dev/null +++ b/doc/remote_gradebook.md @@ -0,0 +1,47 @@ +Grades can be pushed to a remote gradebook, and course enrollment membership can be pulled from a remote gradebook. This file documents how to setup such a remote gradebook, and what the API should be for writing new remote gradebook "xservers". + +1. Definitions + +An "xserver" is a web-based server that is part of the MITx eco system. There are a number of "xserver" programs, including one which does python code grading, an xqueue server, and graders for other coding languages. + +"Stellar" is the MIT on-campus gradebook system. + +2. Setup + +The remote gradebook xserver should be specified in the lms.envs configuration using + + MITX_FEATURES[REMOTE_GRADEBOOK_URL] + +Each course, in addition, should define the name of the gradebook being used. A class "section" may also be specified. This goes in the policy.json file, eg: + + "remote_gradebook": { + "name" : "STELLAR:/project/mitxdemosite", + "section" : "r01" + }, + +3. The API for the remote gradebook xserver is an almost RESTful service model, which only employs POSTs, to the xserver url, with form data for the fields: + + - submit: get-assignments, get-membership, post-grades, or get-sections + - gradebook: name of gradebook + - user: username of staff person initiating the request (for logging) + - section: (optional) name of section + +The return body content should be a JSON string, of the format {'msg': message, 'data': data}. The message is displayed in the instructor dashboard. + +The data is a list of dicts (associative arrays). Each dict should be key:value. + +## For submit=post-grades: + +A file is also posted, with the field name "datafile". This file is CSV format, with two columns, one being "External email" and the other being the name of the assignment (that column contains the grades for the assignment). + +## For submit=get-assignments + +data keys = "AssignmentName" + +## For submit=get-membership + +data keys = "email", "name", "section" + +## For submit=get-sections + +data keys = "SectionName" diff --git a/doc/testing.md b/doc/testing.md index ee54ae74d9..694a9e8231 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -1,17 +1,25 @@ # Testing -Testing is good. Here is some useful info about how we set up tests-- +Testing is good. Here is some useful info about how we set up tests. +More info is [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Test+Engineering) -### Backend code: +## Backend code -- TODO +- The python unit tests can be run via rake tasks. +See development.md for more info on how to do this. -### Frontend code: +## Frontend code -We're using Jasmine to unit-testing the JavaScript files. All the specs are -written in CoffeeScript for the consistency. To access the test cases, start the -server in debug mode, navigate to `http://127.0.0.1:[port number]/_jasmine` to -see the test result. +### Jasmine + +We're using Jasmine to unit/integration test the JavaScript files. +More info [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Jasmine) + +All the specs are written in CoffeeScript to be consistent with the code. +To access the test cases, start the server using the settings file **jasmine.py** using this command: + `rake django-admin[runserver,lms,jasmine,12345]` + +Then navigate to `http://localhost:12345/_jasmine/` to see the test results. All the JavaScript codes must have test coverage. Both CMS and LMS has its own test directory in `{cms,lms}/static/coffee/spec` If you haven't @@ -30,3 +38,31 @@ If you're finishing a feature that contains JavaScript code snippets and do not sure how to test, please feel free to open up a pull request and asking people for help. (However, the best way to do it would be writing your test first, then implement your feature - Test Driven Development.) + +### BDD style acceptance tests with Lettuce + +We're using Lettuce for end user acceptance testing of features. +More info [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Lettuce+Acceptance+Testing) + +Lettuce is a port of Cucumber. We're using it to drive Splinter, which is a python wrapper to Selenium. +To execute the automated test scripts, you'll need to start up the django server separately, then launch the tests. +Do both use the settings file named **acceptance.py**. + +What this will do is to use a sqllite database named mitx_all/db/test_mitx.db. +That way it can be flushed etc. without messing up your dev db. +Note that this also means that you need to syncdb and migrate the db first before starting the server to initialize it if it does not yet exist. + +1. Set up the test database (only needs to be done once): + rm ../db/test_mitx.db + rake django-admin[syncdb,lms,acceptance,--noinput] + rake django-admin[migrate,lms,acceptance,--noinput] + +2. Start up the django server separately in a shell + rake lms[acceptance] + +3. Then in another shell, run the tests in different ways as below. Lettuce comes with a new django-admin command called _harvest_. See the [lettuce django docs](http://lettuce.it/recipes/django-lxml.html) for more details. +* All tests in a specified feature folder: `django-admin.py harvest --no-server --settings=lms.envs.acceptance --pythonpath=. lms/djangoapps/portal/features/` +* Only the specified feature's scenarios: `django-admin.py harvest --no-server --settings=lms.envs.acceptance --pythonpath=. lms/djangoapps/courseware/features/high-level-tabs.feature` + +4. Troubleshooting +* If you get an error msg that says something about harvest not being a command, you probably are missing a requirement. Pip install (test-requirements.txt) and/or brew install as needed. \ No newline at end of file diff --git a/docs/source/graphical_slider_tool.rst b/docs/source/graphical_slider_tool.rst new file mode 100644 index 0000000000..37b17136e8 --- /dev/null +++ b/docs/source/graphical_slider_tool.rst @@ -0,0 +1,563 @@ +********************************************* +Xml format of graphical slider tool [xmodule] +********************************************* + +.. module:: xml_format_gst + + +Format description +================== + +Graphical slider tool (GST) main tag is:: + + BODY + +``graphical_slider_tool`` tag must have two children tags: ``render`` +and ``configuration``. + + +Render tag +---------- + +Render tag can contain usual html tags mixed with some GST specific tags:: + + - represents jQuery slider for changing a parameter's value + - represents a text input field for changing a parameter's value + - represents Flot JS plot element + +Also GST will track all elements inside ```` where ``id`` +attribute is set, and a corresponding parameter referencing that ``id`` is present +in the configuration section below. These will be referred to as dynamic elements. + +The contents of the section will be shown to the user after +all occurrences of:: + + + + + +have been converted to actual sliders, text inputs, and a plot graph. +Everything in square brackets is optional. After initialization, all +text input fields, sliders, and dynamic elements will be set to the initial +values of the parameters that they are assigned to. + +``{parameter name}`` specifies the parameter to which the slider or text +input will be attached to. + +[style="{CSS statements}"] specifies valid CSS styling. It will be passed +directly to the browser without any parsing. + +There is a one-to-one relationship between a slider and a parameter. +I.e. for one parameter you can put only one ```` in the +```` section. However, you don't have to specify a slider - they +are optional. + +There is a many-to-one relationship between text inputs and a +parameter. I.e. for one parameter you can put many '' elements in +the ```` section. However, you don't have to specify a text +input - they are optional. + +You can put only one ```` in the ```` section. It is not +required. + + +Slider tag +.......... + +Slider tag must have ``var`` attribute and optional ``style`` attribute:: + + + +After processing, slider tags will be replaced by jQuery UI sliders with applied +``style`` attribute. + +``var`` attribute must correspond to a parameter. Parameters can be used in any +of the ``function`` tags in ``functions`` tag. By moving slider, value of +parameter ``a`` will change, and so result of function, that depends on parameter +``a``, will also change. + + +Textbox tag +........... + +Texbox tag must have ``var`` attribute and optional ``style`` attribute:: + + + +After processing, textbox tags will be replaced by html text inputs with applied +``style`` attribute. If you want a readonly text input, then you should use a +dynamic element instead (see section below "HTML tagsd with ID"). + +``var`` attribute must correspond to a parameter. Parameters can be used in any +of the ``function`` tags in ``functions`` tag. By changing the value on the text input, +value of parameter ``a`` will change, and so result of function, that depends on +parameter ``a``, will also change. + + +Plot tag +........ + +Plot tag may have optional ``style`` attribute:: + + + +After processing plot tags will be replaced by Flot JS plot with applied +``style`` attribute. + + +HTML tags with ID (dynamic elements) +.................................... + +Any HTML tag with ID, e.g. ```` can be used as a +place where result of function can be inserted. To insert function result to +an element, element ID must be included in ``function`` tag as ``el_id`` attribute +and ``output`` value must be ``"element"``:: + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +Configuration tag +----------------- + +The configuration tag contains parameter settings, graph +settings, and function definitions which are to be plotted on the +graph and that use specified parameters. + +Configuration tag contains two mandatory tag ``functions`` and ``parameters`` and +may contain another ``plot`` tag. + + +Parameters tag +.............. + +``Parameters`` tag contains ``parameter`` tags. Each ``parameter`` tag must have +``var``, ``max``, ``min``, ``step`` and ``initial`` attributes:: + + + + + + +``var`` attribute links min, max, step and initial values to parameter name. + +``min`` attribute is the minimal value that a parameter can take. Slider and input +values can not go below it. + +``max`` attribute is the maximal value that a parameter can take. Slider and input +values can not go over it. + +``step`` attribute is value of slider step. When a slider increase or decreases +the specified parameter, it will do so by the amount specified with 'step' + +``initial`` attribute is the initial value that the specified parameter should be +set to. Sliders and inputs will initially show this value. + +The parameter's name is specified by the ``var`` property. All occurrences +of sliders and/or text inputs that specify a ``var`` property, will be +connected to this parameter - i.e. they will reflect the current +value of the parameter, and will be updated when the parameter +changes. + +If at lest one of these attributes is not set, then the parameter +will not be used, slider's and/or text input elements that specify +this parameter will not be activated, and the specified functions +which use this parameter will not return a numeric value. This means +that neglecting to specify at least one of the attributes for some +parameter will have the result of the whole GST instance not working +properly. + + +Functions tag +............. + +For the GST to do something, you must defined at least one +function, which can use any of the specified parameter values. The +function expects to take the ``x`` value, do some calculations, and +return the ``y`` value. I.e. this is a 2D plot in Cartesian +coordinates. This is how the default function is meant to be used for +the graph. + +There are other special cases of functions. They are used mainly for +outputting to elements, plot labels, or for custom output. Because +the return a single value, and that value is meant for a single element, +these function are invoked only with the set of all of the parameters. +I.e. no ``x`` value is available inside them. They are useful for +showing the current value of a parameter, showing complex static +formulas where some parameter's value must change, and other useful +things. + +The different style of function is specified by the ``output`` attribute. + +Each function must be defined inside ``function`` tag in ``functions`` tag:: + + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +The parameter names (along with their values, as provided from text +inputs and/or sliders), will be available inside all defined +functions. A defined function body string will be parsed internally +by the browser's JavaScript engine and converted to a true JS +function. + +The function's parameter list will automatically be created and +populated, and will include the ``x`` (when ``output`` is not specified or +is set to ``"graph"``), and all of the specified parameter values (from sliders +and text inputs). This means that each of the defined functions will have +access to all of the parameter values. You don't have to use them, but +they will be there. + +Examples:: + + + return x; + + + + return (x + a) * Math.sin(x * b); + + + + function helperFunc(c1) { + return c1 * c1 - a; + } + return helperFunc(x + 10 * a * b) + Math.sin(a - x); + + +Required parameters:: + + function body: + + A string composing a normal JavaScript function + except that there is no function declaration + (along with parameters), and no closing bracket. + + So if you normally would have written your + JavaScript function like this: + + function myFunc(x, a, b) { + return x * a + b; + } + + here you must specify just the function body + (everything that goes between '{' and '}'). So, + you would specify the above function like so (the + bare-bone minimum): + + return x * a + b; + + VERY IMPORTANT: Because the function will be passed + to the browser as a single string, depending on implementation + specifics, the end-of-line characters can be stripped. This + means that single line JavaScript comments (starting with "//") + can lead to the effect that everything after the first such comment + will be treated as a comment. Therefore, it is absolutely + necessary that such single line comments are not used when + defining functions for GST. You can safely use the alternative + multiple line JavaScript comments (such comments start with "/*" + and end with "*/). + + VERY IMPORTANT: If you have a large function body, and decide to + split it into several lines, than you must wrap it in "CDATA" like + so: + + + + + +Optional parameters:: + + + color: Color name ('red', 'green', etc.) or in the form of + '#FFFF00'. If not specified, a default color (different + one for each graphed function) will be given by Flot JS. + line: A string - 'true' or 'false'. Should the data points be + connected by a line on the graph? Default is 'true'. + dot: A string - 'true' or 'false'. Should points be shown for + each data point on the graph? Default is 'false'. + bar: A string - 'true' or 'false'. When set to 'true', points + will be plotted as bars. + label: A string. If provided, will be shown in the legend, along + with the color that was used to plot the function. + output: 'element', 'none', 'plot_label', or 'graph'. If not defined, + function will be plotted (same as setting 'output' to 'graph'). + If defined, and other than 'graph', function will not be + plotted, but it's output will be inserted into the element + with ID specified by 'el_id' attribute. + el_id: Id of HTML element, defined in '' section. Value of + function will be inserted as content of this element. + disable_auto_return: By default, if JavaScript function string is written + without a "return" statement, the "return" will be + prepended to it. Set to "true" to disable this + functionality. This is done so that simple functions + can be defined in an easy fashion (for example, "a", + which will be translated into "return a"). + update_on: A string - 'change', or 'slide'. Default (if not set) is + 'slide'. This defines the event on which a given function is + called, and its result is inserted into an element. This + setting is relevant only when "output" is other than "graph". + +When specifying ``el_id``, it is essential to set "output" to one of + element - GST will invoke the function, and the return of it will be + inserted into a HTML element with id specified by ``el_id``. + none - GST will simply inoke the function. It is left to the instructor + who writes the JavaScript function body to update all necesary + HTML elements inside the function, before it exits. This is done + so that extra steps can be preformed after an HTML element has + been updated with a value. Note, that because the return value + from this function is not actually used, it will be tempting to + omit the "return" statement. However, in this case, the attribute + "disable_auto_return" must be set to "true" in order to prevent + GST from inserting a "return" statement automatically. + plot_label - GST will process all plot labels (which are strings), and + will replace the all instances of substrings specified by + ``el_id`` with the returned value of the function. This is + necessary if you want a label in the graph to have some changing + number. Because of the nature of Flot JS, it is impossible to + achieve the same effect by setting the "output" attribute + to "element", and including a HTML element in the label. + +The above values for "output" will tell GST that the function is meant for an +HTML element (not for graph), and that it should not get an 'x' parameter (along +with some value). + + +[Note on MathJax and labels] +............................ + +Independently of this module, will render all TeX code +within the ```` section into nice mathematical formulas. Just +remember to wrap it in one of:: + + \( and \) - for inline formulas (formulas surrounded by + standard text) + \[ and \] - if you want the formula to be a separate line + +It is possible to define a label in standard TeX notation. The JS +library MathJax will work on these labels also because they are +inserted on top of the plot as standard HTML (text within a DIV). + +If the label is dynamic, i.e. it will contain some text (numeric, or other) +that has to be updated on a parameter's change, then one can define +a special function to handle this. The "output" of such a function must be +set to "none", and the JavaScript code inside this function must update the +MathJax element by itself. Before exiting, MathJax typeset function should +be called so that the new text will be re-rendered by MathJax. For example, + + + ... + + + ... + + + + ... + + +Plot tag +........ + +``Plot`` tag inside ``configuration`` tag defines settings for plot output. + +Required parameters:: + + xrange: 2 functions that must return value. Value is constant (3.1415) + or depend on parameter from parameters section: + + return 0; + return 30; + + or + + return -a; + return a; + + + All functions will be calculated over domain between xrange:min + and xrange:max. Xrange depending on parameter is extremely + useful when domain(s) of your function(s) depends on parameter + (like circle, when parameter is radius and you want to allow + to change it). + +Optional parameters:: + + num_points: Number of data points to generated for the plot. If + this is not set, the number of points will be + calculated as width / 5. + + bar_width: If functions are present which are to be plotted as bars, + then this parameter specifies the width of the bars. A + numeric value for this parameter is expected. + + bar_align: If functions are present which are to be plotted as bars, + then this parameter specifies how to align the bars relative + to the tick. Available values are "left" and "center". + + xticks, + yticks: 3 floating point numbers separated by commas. This + specifies how many ticks are created, what number they + start at, and what number they end at. This is different + from the 'xrange' setting in that it has nothing to do + with the data points - it control what area of the + Cartesian space you will see. The first number is the + first tick's value, the second number is the step + between each tick, the third number is the value of the + last tick. If these configurations are not specified, + Flot will chose them for you based on the data points + set that he is currently plotting. Usually, this results + in a nice graph, however, sometimes you need to fine + grain the controls. For example, when you want to show + a fixed area of the Cartesian space, even when the data + set changes. On it's own, Flot will recalculate the + ticks, which will result in a different graph each time. + By specifying the xticks, yticks configurations, only + the plotted data will change - the axes (ticks) will + remain as you have defined them. + + xticks_names, yticks_names: + A JSON string which represents a mapping of xticks, yticks + values to some defined strings. If specified, the graph will + not have any xticks, yticks except those for which a string + value has been defined in the JSON string. Note that the + matching will be string-based and not numeric. I.e. if a tick + value was "3.70" before, then inside the JSON there should be + a mapping like {..., "3.70": "Some string", ...}. Example: + + + + + + + + + + xunits, + yunits: Units values to be set on axes. Use MathJax. Example: + \(cm\) + \(m\) + + moving_label: + A way to specify a label that should be positioned dynamically, + based on the values of some parameters, or some other factors. + It is similar to a , but it is only valid for a plot + because it is drawn relative to the plot coordinate system. + + Multiple "moving_label" configurations can be provided, each one + with a unique text and a unique set of functions that determine + it's dynamic positioning. + + Each "moving_label" can have a "color" attribute (CSS color notation), + and a "weight" attribute. "weight" can be one of "normal" or "bold", + and determines the styling of moving label's text. + + Each "moving_label" function should return an object with a 'x' + and 'y properties. Within those functions, all of the parameter + names along with their value are available. + + Example (note that "return" statement is missing; it will be automatically + inserted by GST): + + + + +

Graphic slider tool: Bar graph example.

+ +

We can request the API to plot us a bar graph.

+
+

a

+ + +


+

b

+ + +
+ +
+ + + + + + + + 0.9) && (x<1.1)) || ((x>4.9) && (x<5.1))) { return Math.sin(a * 0.01 * Math.PI + 2.952 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos(b * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos((b - 10 * a) * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos((b + 7 * a) * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + + 15 + 5 + 0, 0.5, 6 + -1.5, 0.1, 1.5 + + + + + + + 0.4 + + +
+ diff --git a/docs/source/gst_example_dynamic_labels.xml b/docs/source/gst_example_dynamic_labels.xml new file mode 100644 index 0000000000..05cbe407fb --- /dev/null +++ b/docs/source/gst_example_dynamic_labels.xml @@ -0,0 +1,40 @@ + + + +

Graphic slider tool: Dynamic labels.

+

There are two kinds of dynamic lables. + 1) Dynamic changing values in graph legends. + 2) Dynamic labels, which coordinates depend on parameters

+

a:

+
+

b:

+

+ +
+ + + + + + + + a * x + b + + a + + + 030 + 10 + 0, 6, 30 + -9, 1, 9 + + + + + + + + + +
+
\ No newline at end of file diff --git a/docs/source/gst_example_dynamic_range.xml b/docs/source/gst_example_dynamic_range.xml new file mode 100644 index 0000000000..0ce4263d62 --- /dev/null +++ b/docs/source/gst_example_dynamic_range.xml @@ -0,0 +1,37 @@ + + + +

Graphic slider tool: Dynamic range and implicit functions.

+ +

You can make x range (not ticks of x axis) of functions to depend on + parameter value. This can be useful when function domain depends + on parameter.

+

Also implicit functons like circle can be plotted as 2 separate + functions of same color.

+
+ + +
+ +
+ + + + + + Math.sqrt(a * a - x * x) + -Math.sqrt(a * a - x * x) + + + + + -a + a + + 1000 + -30, 6, 30 + -30, 6, 30 + + +
+
diff --git a/docs/source/gst_example_html_element_output.xml b/docs/source/gst_example_html_element_output.xml new file mode 100644 index 0000000000..340783871a --- /dev/null +++ b/docs/source/gst_example_html_element_output.xml @@ -0,0 +1,40 @@ + + + +

Graphic slider tool: Output to DOM element.

+ +

a + b =

+ +
+

a

+ + +
+ +
+

b

+ + +
+


+ +
+ + + + + + + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +
+
diff --git a/docs/source/gst_example_with_documentation.xml b/docs/source/gst_example_with_documentation.xml new file mode 100644 index 0000000000..addada5b10 --- /dev/null +++ b/docs/source/gst_example_with_documentation.xml @@ -0,0 +1,91 @@ + + + + +

Graphic slider tool: full example.

+

+ A simple equation + \( + y_1 = 10 \times b \times \frac{sin(a \times x) \times sin(b \times x)}{cos(b \times x) + 10} + \) + can be plotted. +

+ + +
+

Currently \(a\) is

+ + +
+ +

This one + \( + y_2 = sin(a \times x) + \) + will be overlayed on top. +

+
+

Currently \(b\) is

+ +
+
+

To change \(a\) use:

+ +
+
+

To change \(b\) use:

+ +
+ +
+

Second input for b:

+ + +
+
+ + + + + + + + + + + + return 10.0 * b * Math.sin(a * x) * Math.sin(b * x) / (Math.cos(b * x) + 10); + + + + Math.sin(a * x); + + + function helperFunc(c1) { + return c1 * c1 - a; + } + + return helperFunc(x + 10 * a * b) + Math.sin(a - x); + + a + + + + + + return 0; + + 30 + + + 120 + + 0, 3, 30 + -1.5, 1.5, 13.5 + + \(cm\) + \(m\) + + +
+
diff --git a/docs/source/index.rst b/docs/source/index.rst index 92c535a624..d2082ff3a0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -14,7 +14,7 @@ Contents: overview.rst common-lib.rst djangoapps.rst - + xml_formats.rst Indices and tables ================== diff --git a/docs/source/xml_formats.rst b/docs/source/xml_formats.rst new file mode 100644 index 0000000000..b76ee11642 --- /dev/null +++ b/docs/source/xml_formats.rst @@ -0,0 +1,8 @@ +XML formats of Inputtypes and Xmodule +===================================== +Contents: + +.. toctree:: + :maxdepth: 2 + + graphical_slider_tool.rst \ No newline at end of file diff --git a/jenkins/quality.sh b/jenkins/quality.sh index 4cf26d76bf..56217af874 100755 --- a/jenkins/quality.sh +++ b/jenkins/quality.sh @@ -3,6 +3,8 @@ set -e set -x +git remote prune origin + # Reset the submodule, in case it changed git submodule foreach 'git reset --hard HEAD' diff --git a/jenkins/test.sh b/jenkins/test.sh index 8a96024785..e960313d76 100755 --- a/jenkins/test.sh +++ b/jenkins/test.sh @@ -15,6 +15,8 @@ function github_mark_failed_on_exit { trap '[ $? == "0" ] || github_status state:failure "failed"' EXIT } +git remote prune origin + github_mark_failed_on_exit github_status state:pending "is running" @@ -26,6 +28,12 @@ export PYTHONIOENCODING=UTF-8 GIT_BRANCH=${GIT_BRANCH/HEAD/master} +# Temporary workaround for pip/numpy bug. (Jenkin's is unable to pip install numpy successfully, scipy fails to install afterwards. +# We tried pip 1.1, 1.2, all sorts of varieties but it's apparently a pip bug of some kind. +wget -O /tmp/numpy.tar.gz http://pypi.python.org/packages/source/n/numpy/numpy-1.6.2.tar.gz#md5=95ed6c9dcc94af1fc1642ea2a33c1bba +tar -zxvf /tmp/numpy.tar.gz -C /tmp/ +python /tmp/numpy-1.6.2/setup.py install + pip install -q -r pre-requirements.txt pip install -q -r test-requirements.txt yes w | pip install -q -r requirements.txt diff --git a/lms/.coveragerc b/lms/.coveragerc index acac3ed4f2..7e18a37492 100644 --- a/lms/.coveragerc +++ b/lms/.coveragerc @@ -2,11 +2,13 @@ [run] data_file = reports/lms/.coverage source = lms +omit = lms/envs/* [report] ignore_errors = True [html] +title = LMS Python Test Coverage Report directory = reports/lms/cover [xml] diff --git a/lms/djangoapps/courseware/access.py b/lms/djangoapps/courseware/access.py index ba9b8a3bc0..26f9fcdfd3 100644 --- a/lms/djangoapps/courseware/access.py +++ b/lms/djangoapps/courseware/access.py @@ -13,6 +13,8 @@ from xmodule.modulestore import Location from xmodule.timeparse import parse_time from xmodule.x_module import XModule, XModuleDescriptor +from student.models import CourseEnrollmentAllowed + DEBUG_ACCESS = False log = logging.getLogger(__name__) @@ -124,6 +126,11 @@ def _has_access_course_desc(user, course, action): debug("Allow: in enrollment period") return True + # if user is in CourseEnrollmentAllowed with right course_id then can also enroll + if user is not None and CourseEnrollmentAllowed: + if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course.id): + return True + # otherwise, need staff access return _has_staff_access_to_descriptor(user, course) @@ -159,13 +166,19 @@ def _has_access_course_desc(user, course, action): return _dispatch(checkers, action, user, course) + def _get_access_group_name_course_desc(course, action): ''' - Return name of group which gives staff access to course. Only understands action = 'staff' + Return name of group which gives staff access to course. Only understands action = 'staff' and 'instructor' ''' - if not action=='staff': - return [] - return _course_staff_group_name(course.location) + if action=='staff': + return _course_staff_group_name(course.location) + elif action=='instructor': + return _course_instructor_group_name(course.location) + + return [] + + def _has_access_error_desc(user, descriptor, action): """ diff --git a/lms/djangoapps/courseware/admin.py b/lms/djangoapps/courseware/admin.py index cda4fbb788..f7e54d1800 100644 --- a/lms/djangoapps/courseware/admin.py +++ b/lms/djangoapps/courseware/admin.py @@ -7,3 +7,8 @@ from django.contrib import admin from django.contrib.auth.models import User admin.site.register(StudentModule) + +admin.site.register(OfflineComputedGrade) + +admin.site.register(OfflineComputedGradeLog) + diff --git a/lms/djangoapps/courseware/courses.py b/lms/djangoapps/courseware/courses.py index 65a1eee25b..7c0d30ebd8 100644 --- a/lms/djangoapps/courseware/courses.py +++ b/lms/djangoapps/courseware/courses.py @@ -217,11 +217,21 @@ def get_courses_by_university(user, domain=None): ''' # TODO: Clean up how 'error' is done. # filter out any courses that errored. - visible_courses = branding.get_visible_courses(domain) + visible_courses = get_courses(user, domain) universities = defaultdict(list) for course in visible_courses: - if not has_access(user, course, 'see_exists'): - continue universities[course.org].append(course) + return universities + + +def get_courses(user, domain=None): + ''' + Returns a list of courses available, sorted by course.number + ''' + courses = branding.get_visible_courses(domain) + courses = [c for c in courses if has_access(user, c, 'see_exists')] + + courses = sorted(courses, key=lambda course:course.number) + return courses diff --git a/lms/djangoapps/courseware/features/courses.py b/lms/djangoapps/courseware/features/courses.py new file mode 100644 index 0000000000..aecaa139ff --- /dev/null +++ b/lms/djangoapps/courseware/features/courses.py @@ -0,0 +1,254 @@ +from lettuce import world +from xmodule.course_module import CourseDescriptor +from xmodule.modulestore.django import modulestore +from courseware.courses import get_course_by_id +from xmodule import seq_module, vertical_module + +from logging import getLogger +logger = getLogger(__name__) + +## support functions +def get_courses(): + ''' + Returns dict of lists of courses available, keyed by course.org (ie university). + Courses are sorted by course.number. + ''' + courses = [c for c in modulestore().get_courses() + if isinstance(c, CourseDescriptor)] + courses = sorted(courses, key=lambda course: course.number) + return courses + +# def get_courseware(course_id): +# """ +# Given a course_id (string), return a courseware array of dictionaries for the +# top two levels of navigation. Example: + +# [ +# {'chapter_name': 'Overview', +# 'sections': ['Welcome', 'System Usage Sequence', 'Lab0: Using the tools', 'Circuit Sandbox'] +# }, +# {'chapter_name': 'Week 1', +# 'sections': ['Administrivia and Circuit Elements', 'Basic Circuit Analysis', 'Resistor Divider', 'Week 1 Tutorials'] +# }, +# {'chapter_name': 'Midterm Exam', +# 'sections': ['Midterm Exam'] +# } +# ] +# """ + +# course = get_course_by_id(course_id) +# chapters = course.get_children() +# courseware = [ {'chapter_name':c.display_name, 'sections':[s.display_name for s in c.get_children()]} for c in chapters] +# return courseware + +def get_courseware_with_tabs(course_id): + """ + Given a course_id (string), return a courseware array of dictionaries for the + top three levels of navigation. Same as get_courseware() except include + the tabs on the right hand main navigation page. + + This hides the appropriate courseware as defined by the XML flag test: + chapter.metadata.get('hide_from_toc','false').lower() == 'true' + + Example: + + [{ + 'chapter_name': 'Overview', + 'sections': [{ + 'clickable_tab_count': 0, + 'section_name': 'Welcome', + 'tab_classes': [] + }, { + 'clickable_tab_count': 1, + 'section_name': 'System Usage Sequence', + 'tab_classes': ['VerticalDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Lab0: Using the tools', + 'tab_classes': ['HtmlDescriptor', 'HtmlDescriptor', 'CapaDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Circuit Sandbox', + 'tab_classes': [] + }] + }, { + 'chapter_name': 'Week 1', + 'sections': [{ + 'clickable_tab_count': 4, + 'section_name': 'Administrivia and Circuit Elements', + 'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Basic Circuit Analysis', + 'tab_classes': ['CapaDescriptor', 'CapaDescriptor', 'CapaDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Resistor Divider', + 'tab_classes': [] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Week 1 Tutorials', + 'tab_classes': [] + }] + }, { + 'chapter_name': 'Midterm Exam', + 'sections': [{ + 'clickable_tab_count': 2, + 'section_name': 'Midterm Exam', + 'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor'] + }] + }] + """ + + course = get_course_by_id(course_id) + chapters = [ chapter for chapter in course.get_children() if chapter.metadata.get('hide_from_toc','false').lower() != 'true' ] + courseware = [{'chapter_name':c.display_name, + 'sections':[{'section_name':s.display_name, + 'clickable_tab_count':len(s.get_children()) if (type(s)==seq_module.SequenceDescriptor) else 0, + 'tabs':[{'children_count':len(t.get_children()) if (type(t)==vertical_module.VerticalDescriptor) else 0, + 'class':t.__class__.__name__ } + for t in s.get_children() ]} + for s in c.get_children() if s.metadata.get('hide_from_toc', 'false').lower() != 'true']} + for c in chapters ] + + return courseware + +def process_section(element, num_tabs=0): + ''' + Process section reads through whatever is in 'course-content' and classifies it according to sequence module type. + + This function is recursive + + There are 6 types, with 6 actions. + + Sequence Module + -contains one child module + + Vertical Module + -contains other modules + -process it and get its children, then process them + + Capa Module + -problem type, contains only one problem + -for this, the most complex type, we created a separate method, process_problem + + Video Module + -video type, contains only one video + -we only check to ensure that a section with class of video exists + + HTML Module + -html text + -we do not check anything about it + + Custom Tag Module + -a custom 'hack' module type + -there is a large variety of content that could go in a custom tag module, so we just pass if it is of this unusual type + + can be used like this: + e = world.browser.find_by_css('section.course-content section') + process_section(e) + + ''' + if element.has_class('xmodule_display xmodule_SequenceModule'): + logger.debug('####### Processing xmodule_SequenceModule') + child_modules = element.find_by_css("div>div>section[class^='xmodule']") + for mod in child_modules: + process_section(mod) + + elif element.has_class('xmodule_display xmodule_VerticalModule'): + logger.debug('####### Processing xmodule_VerticalModule') + vert_list = element.find_by_css("li section[class^='xmodule']") + for item in vert_list: + process_section(item) + + elif element.has_class('xmodule_display xmodule_CapaModule'): + logger.debug('####### Processing xmodule_CapaModule') + assert element.find_by_css("section[id^='problem']"), "No problems found in Capa Module" + p = element.find_by_css("section[id^='problem']").first + p_id = p['id'] + logger.debug('####################') + logger.debug('id is "%s"' % p_id) + logger.debug('####################') + process_problem(p, p_id) + + elif element.has_class('xmodule_display xmodule_VideoModule'): + logger.debug('####### Processing xmodule_VideoModule') + assert element.find_by_css("section[class^='video']"), "No video found in Video Module" + + elif element.has_class('xmodule_display xmodule_HtmlModule'): + logger.debug('####### Processing xmodule_HtmlModule') + pass + + elif element.has_class('xmodule_display xmodule_CustomTagModule'): + logger.debug('####### Processing xmodule_CustomTagModule') + pass + + else: + assert False, "Class for element not recognized!!" + + + +def process_problem(element, problem_id): + ''' + Process problem attempts to + 1) scan all the input fields and reset them + 2) click the 'check' button and look for an incorrect response (p.status text should be 'incorrect') + 3) click the 'show answer' button IF it exists and IF the answer is not already displayed + 4) enter the correct answer in each input box + 5) click the 'check' button and verify that answers are correct + + Because of all the ajax calls happening, sometimes the test fails because objects disconnect from the DOM. + The basic functionality does exist, though, and I'm hoping that someone can take it over and make it super effective. + ''' + + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + + ## clear out all input to ensure an incorrect result + for field in input_fields: + field.find_by_css("input").first.fill('') + + ## because of cookies or the application, only click the 'check' button if the status is not already 'incorrect' + # This would need to be reworked because multiple choice problems don't have this status + # if prob_xmod.find_by_css("p.status").first.text.strip().lower() != 'incorrect': + prob_xmod.find_by_css("section.action input.check").first.click() + + ## all elements become disconnected after the click + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + # Wait for the ajax reload + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + for field in input_fields: + assert field.find_by_css("div.incorrect"), "The 'check' button did not work for %s" % (problem_id) + + show_button = element.find_by_css("section.action input.show").first + ## this logic is to ensure we do not accidentally hide the answers + if show_button.value.lower() == 'show answer': + show_button.click() + else: + pass + + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + + ## in each field, find the answer, and send it to the field. + ## Note that this does not work if the answer type is a strange format, e.g. "either a or b" + for field in input_fields: + field.find_by_css("input").first.fill(field.find_by_css("p[id^='answer']").first.text) + + prob_xmod.find_by_css("section.action input.check").first.click() + + ## assert that we entered the correct answers + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + for field in input_fields: + ## if you don't use 'starts with ^=' the test will fail because the actual class is 'correct ' (with a space) + assert field.find_by_css("div[class^='correct']"), "The check answer values were not correct for %s" % problem_id diff --git a/lms/djangoapps/courseware/features/courseware.feature b/lms/djangoapps/courseware/features/courseware.feature new file mode 100644 index 0000000000..21c7e84541 --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware.feature @@ -0,0 +1,18 @@ +Feature: View the Courseware Tab + As a student in an edX course + In order to work on the course + I want to view the info on the courseware tab + + Scenario: I can get to the courseware tab when logged in + Given I am registered for a course + And I log in + And I click on View Courseware + When I click on the "Courseware" tab + Then the "Courseware" tab is active + + # TODO: fix this one? Not sure whether you should get a 404. + # Scenario: I cannot get to the courseware tab when not logged in + # Given I am not logged in + # And I visit the homepage + # When I visit the courseware URL + # Then the login dialog is visible diff --git a/lms/djangoapps/courseware/features/courseware.py b/lms/djangoapps/courseware/features/courseware.py new file mode 100644 index 0000000000..05ecd63f4b --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware.py @@ -0,0 +1,7 @@ +from lettuce import world, step +from lettuce.django import django_url + +@step('I visit the courseware URL$') +def i_visit_the_course_info_url(step): + url = django_url('/courses/MITx/6.002x/2012_Fall/courseware') + world.browser.visit(url) \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/courseware_common.py b/lms/djangoapps/courseware/features/courseware_common.py new file mode 100644 index 0000000000..8850c88fef --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware_common.py @@ -0,0 +1,37 @@ +from lettuce import world, step +from lettuce.django import django_url + +@step('I click on View Courseware') +def i_click_on_view_courseware(step): + css = 'p.enter-course' + world.browser.find_by_css(css).first.click() + +@step('I click on the "([^"]*)" tab$') +def i_click_on_the_tab(step, tab): + world.browser.find_link_by_text(tab).first.click() + world.save_the_html() + +@step('I visit the courseware URL$') +def i_visit_the_course_info_url(step): + url = django_url('/courses/MITx/6.002x/2012_Fall/courseware') + world.browser.visit(url) + +@step(u'I do not see "([^"]*)" anywhere on the page') +def i_do_not_see_text_anywhere_on_the_page(step, text): + assert world.browser.is_text_not_present(text) + +@step(u'I am on the dashboard page$') +def i_am_on_the_dashboard_page(step): + assert world.browser.is_element_present_by_css('section.courses') + assert world.browser.url == django_url('/dashboard') + +@step('the "([^"]*)" tab is active$') +def the_tab_is_active(step, tab): + css = '.course-tabs a.active' + active_tab = world.browser.find_by_css(css) + assert (active_tab.text == tab) + +@step('the login dialog is visible$') +def login_dialog_visible(step): + css = 'form#login_form.login_form' + assert world.browser.find_by_css(css).visible diff --git a/lms/djangoapps/courseware/features/high-level-tabs.feature b/lms/djangoapps/courseware/features/high-level-tabs.feature new file mode 100644 index 0000000000..2e9c4f1886 --- /dev/null +++ b/lms/djangoapps/courseware/features/high-level-tabs.feature @@ -0,0 +1,23 @@ +Feature: All the high level tabs should work + In order to preview the courseware + As a student + I want to navigate through the high level tabs + +# Note this didn't work as a scenario outline because +# before each scenario was not flushing the database +# TODO: break this apart so that if one fails the others +# will still run + Scenario: A student can see all tabs of the course + Given I am registered for a course + And I log in + And I click on View Courseware + When I click on the "Courseware" tab + Then the page title should be "6.002x Courseware" + When I click on the "Course Info" tab + Then the page title should be "6.002x Course Info" + When I click on the "Textbook" tab + Then the page title should be "6.002x Textbook" + When I click on the "Wiki" tab + Then the page title should be "6.002x | edX Wiki" + When I click on the "Progress" tab + Then the page title should be "6.002x Progress" diff --git a/lms/djangoapps/courseware/features/openended.feature b/lms/djangoapps/courseware/features/openended.feature new file mode 100644 index 0000000000..3c7043ba54 --- /dev/null +++ b/lms/djangoapps/courseware/features/openended.feature @@ -0,0 +1,33 @@ +Feature: Open ended grading + As a student in an edX course + In order to complete the courseware questions + I want the machine learning grading to be functional + + Scenario: An answer that is too short is rejected + Given I navigate to an openended question + And I enter the answer "z" + When I press the "Check" button + And I wait for "8" seconds + And I see the grader status "Submitted for grading" + And I press the "Recheck for Feedback" button + Then I see the red X + And I see the grader score "0" + + Scenario: An answer with too many spelling errors is rejected + Given I navigate to an openended question + And I enter the answer "az" + When I press the "Check" button + And I wait for "8" seconds + And I see the grader status "Submitted for grading" + And I press the "Recheck for Feedback" button + Then I see the red X + And I see the grader score "0" + When I click the link for full output + Then I see the spelling grading message "More spelling errors than average." + + Scenario: An answer makes its way to the instructor dashboard + Given I navigate to an openended question as staff + When I submit the answer "I love Chemistry." + And I wait for "8" seconds + And I visit the staff grading page + Then my answer is queued for instructor grading \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/openended.py b/lms/djangoapps/courseware/features/openended.py new file mode 100644 index 0000000000..d37f9a0fae --- /dev/null +++ b/lms/djangoapps/courseware/features/openended.py @@ -0,0 +1,89 @@ +from lettuce import world, step +from lettuce.django import django_url +from nose.tools import assert_equals, assert_in +from logging import getLogger +logger = getLogger(__name__) + +@step('I navigate to an openended question$') +def navigate_to_an_openended_question(step): + world.register_by_course_id('MITx/3.091x/2012_Fall') + world.log_in('robot@edx.org','test') + problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/' + world.browser.visit(django_url(problem)) + tab_css = 'ol#sequence-list > li > a[data-element="5"]' + world.browser.find_by_css(tab_css).click() + +@step('I navigate to an openended question as staff$') +def navigate_to_an_openended_question_as_staff(step): + world.register_by_course_id('MITx/3.091x/2012_Fall', True) + world.log_in('robot@edx.org','test') + problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/' + world.browser.visit(django_url(problem)) + tab_css = 'ol#sequence-list > li > a[data-element="5"]' + world.browser.find_by_css(tab_css).click() + +@step(u'I enter the answer "([^"]*)"$') +def enter_the_answer_text(step, text): + textarea_css = 'textarea' + world.browser.find_by_css(textarea_css).first.fill(text) + +@step(u'I submit the answer "([^"]*)"$') +def i_submit_the_answer_text(step, text): + textarea_css = 'textarea' + world.browser.find_by_css(textarea_css).first.fill(text) + check_css = 'input.check' + world.browser.find_by_css(check_css).click() + +@step('I click the link for full output$') +def click_full_output_link(step): + link_css = 'a.full' + world.browser.find_by_css(link_css).first.click() + +@step(u'I visit the staff grading page$') +def i_visit_the_staff_grading_page(step): + # course_u = '/courses/MITx/3.091x/2012_Fall' + # sg_url = '%s/staff_grading' % course_u + world.browser.click_link_by_text('Instructor') + world.browser.click_link_by_text('Staff grading') + # world.browser.visit(django_url(sg_url)) + +@step(u'I see the grader message "([^"]*)"$') +def see_grader_message(step, msg): + message_css = 'div.external-grader-message' + grader_msg = world.browser.find_by_css(message_css).text + assert_in(msg, grader_msg) + +@step(u'I see the grader status "([^"]*)"$') +def see_the_grader_status(step, status): + status_css = 'div.grader-status' + grader_status = world.browser.find_by_css(status_css).text + assert_equals(status, grader_status) + +@step('I see the red X$') +def see_the_red_x(step): + x_css = 'div.grader-status > span.incorrect' + assert world.browser.find_by_css(x_css) + +@step(u'I see the grader score "([^"]*)"$') +def see_the_grader_score(step, score): + score_css = 'div.result-output > p' + score_text = world.browser.find_by_css(score_css).text + assert_equals(score_text, 'Score: %s' % score) + +@step('I see the link for full output$') +def see_full_output_link(step): + link_css = 'a.full' + assert world.browser.find_by_css(link_css) + +@step('I see the spelling grading message "([^"]*)"$') +def see_spelling_msg(step, msg): + spelling_css = 'div.spelling' + spelling_msg = world.browser.find_by_css(spelling_css).text + assert_equals('Spelling: %s' % msg, spelling_msg) + +@step(u'my answer is queued for instructor grading$') +def answer_is_queued_for_instructor_grading(step): + list_css = 'ul.problem-list > li > a' + actual_msg = world.browser.find_by_css(list_css).text + expected_msg = "(0 graded, 1 pending)" + assert_in(expected_msg, actual_msg) diff --git a/lms/djangoapps/courseware/features/smart-accordion.feature b/lms/djangoapps/courseware/features/smart-accordion.feature new file mode 100644 index 0000000000..90d097144a --- /dev/null +++ b/lms/djangoapps/courseware/features/smart-accordion.feature @@ -0,0 +1,59 @@ +# Here are all the courses for Fall 2012 +# MITx/3.091x/2012_Fall +# MITx/6.002x/2012_Fall +# MITx/6.00x/2012_Fall +# HarvardX/CS50x/2012 (we will not be testing this, as it is anomolistic) +# HarvardX/PH207x/2012_Fall +# BerkeleyX/CS169.1x/2012_Fall +# BerkeleyX/CS169.2x/2012_Fall +# BerkeleyX/CS184.1x/2012_Fall + +#You can load the courses into your data directory with these cmds: +# git clone https://github.com/MITx/3.091x.git +# git clone https://github.com/MITx/6.00x.git +# git clone https://github.com/MITx/content-mit-6002x.git +# git clone https://github.com/MITx/content-mit-6002x.git +# git clone https://github.com/MITx/content-harvard-id270x.git +# git clone https://github.com/MITx/content-berkeley-cs169x.git +# git clone https://github.com/MITx/content-berkeley-cs169.2x.git +# git clone https://github.com/MITx/content-berkeley-cs184x.git + +Feature: There are courses on the homepage + In order to compared rendered content to the database + As an acceptance test + I want to count all the chapters, sections, and tabs for each course + + Scenario: Navigate through course MITx/3.091x/2012_Fall + Given I am registered for course "MITx/3.091x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course MITx/6.002x/2012_Fall + Given I am registered for course "MITx/6.002x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course MITx/6.00x/2012_Fall + Given I am registered for course "MITx/6.00x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course HarvardX/PH207x/2012_Fall + Given I am registered for course "HarvardX/PH207x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS169.1x/2012_Fall + Given I am registered for course "BerkeleyX/CS169.1x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS169.2x/2012_Fall + Given I am registered for course "BerkeleyX/CS169.2x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS184.1x/2012_Fall + Given I am registered for course "BerkeleyX/CS184.1x/2012_Fall" + And I log in + Then I verify all the content of each course \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/smart-accordion.py b/lms/djangoapps/courseware/features/smart-accordion.py new file mode 100644 index 0000000000..95d3396f57 --- /dev/null +++ b/lms/djangoapps/courseware/features/smart-accordion.py @@ -0,0 +1,152 @@ +from lettuce import world, step +from re import sub +from nose.tools import assert_equals +from xmodule.modulestore.django import modulestore +from courses import * + +from logging import getLogger +logger = getLogger(__name__) + +def check_for_errors(): + e = world.browser.find_by_css('.outside-app') + if len(e) > 0: + assert False, 'there was a server error at %s' % (world.browser.url) + else: + assert True + +@step(u'I verify all the content of each course') +def i_verify_all_the_content_of_each_course(step): + all_possible_courses = get_courses() + logger.debug('Courses found:') + for c in all_possible_courses: + logger.debug(c.id) + ids = [c.id for c in all_possible_courses] + + # Get a list of all the registered courses + registered_courses = world.browser.find_by_css('article.my-course') + if len(all_possible_courses) < len(registered_courses): + assert False, "user is registered for more courses than are uniquely posssible" + else: + pass + + for test_course in registered_courses: + test_course.find_by_css('a').click() + check_for_errors() + + # Get the course. E.g. 'MITx/6.002x/2012_Fall' + current_course = sub('/info','', sub('.*/courses/', '', world.browser.url)) + validate_course(current_course,ids) + + world.browser.find_link_by_text('Courseware').click() + assert world.browser.is_element_present_by_id('accordion',wait_time=2) + check_for_errors() + browse_course(current_course) + + # clicking the user link gets you back to the user's home page + world.browser.find_by_css('.user-link').click() + check_for_errors() + +def browse_course(course_id): + + ## count chapters from xml and page and compare + chapters = get_courseware_with_tabs(course_id) + num_chapters = len(chapters) + + rendered_chapters = world.browser.find_by_css('#accordion > nav > div') + num_rendered_chapters = len(rendered_chapters) + + msg = '%d chapters expected, %d chapters found on page for %s' % (num_chapters, num_rendered_chapters, course_id) + #logger.debug(msg) + assert num_chapters == num_rendered_chapters, msg + + chapter_it = 0 + + ## Iterate the chapters + while chapter_it < num_chapters: + + ## click into a chapter + world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('h3').click() + + ## look for the "there was a server error" div + check_for_errors() + + ## count sections from xml and page and compare + sections = chapters[chapter_it]['sections'] + num_sections = len(sections) + + rendered_sections = world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li') + num_rendered_sections = len(rendered_sections) + + msg = ('%d sections expected, %d sections found on page, %s - %d - %s' % + (num_sections, num_rendered_sections, course_id, chapter_it, chapters[chapter_it]['chapter_name'])) + #logger.debug(msg) + assert num_sections == num_rendered_sections, msg + + section_it = 0 + + ## Iterate the sections + while section_it < num_sections: + + ## click on a section + world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li')[section_it].find_by_tag('a').click() + + ## sometimes the course-content takes a long time to load + assert world.browser.is_element_present_by_css('.course-content',wait_time=5) + + ## look for server error div + check_for_errors() + + ## count tabs from xml and page and compare + + ## count the number of tabs. If number of tabs is 0, there won't be anything rendered + ## so we explicitly set rendered_tabs because otherwise find_elements returns a None object with no length + num_tabs = sections[section_it]['clickable_tab_count'] + if num_tabs != 0: + rendered_tabs = world.browser.find_by_css('ol#sequence-list > li') + num_rendered_tabs = len(rendered_tabs) + else: + rendered_tabs = 0 + num_rendered_tabs = 0 + + msg = ('%d tabs expected, %d tabs found, %s - %d - %s' % + (num_tabs, num_rendered_tabs, course_id, section_it, sections[section_it]['section_name'])) + #logger.debug(msg) + + # Save the HTML to a file for later comparison + world.save_the_course_content('/tmp/%s' % course_id) + + assert num_tabs == num_rendered_tabs, msg + + tabs = sections[section_it]['tabs'] + tab_it = 0 + + ## Iterate the tabs + while tab_it < num_tabs: + + rendered_tabs[tab_it].find_by_tag('a').click() + + ## do something with the tab sections[section_it] + # e = world.browser.find_by_css('section.course-content section') + # process_section(e) + tab_children = tabs[tab_it]['children_count'] + tab_class = tabs[tab_it]['class'] + if tab_children != 0: + rendered_items = world.browser.find_by_css('div#seq_content > section > ol > li > section') + num_rendered_items = len(rendered_items) + msg = ('%d items expected, %d items found, %s - %d - %s - tab %d' % + (tab_children, num_rendered_items, course_id, section_it, sections[section_it]['section_name'], tab_it)) + #logger.debug(msg) + assert tab_children == num_rendered_items, msg + + tab_it += 1 + + section_it += 1 + + chapter_it += 1 + + +def validate_course(current_course, ids): + try: + ids.index(current_course) + except: + assert False, "invalid course id %s" % current_course diff --git a/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py b/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py new file mode 100644 index 0000000000..674f97cec8 --- /dev/null +++ b/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'OfflineComputedGrade' + db.create_table('courseware_offlinecomputedgrade', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)), + ('gradeset', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), + )) + db.send_create_signal('courseware', ['OfflineComputedGrade']) + + # Adding unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id'] + db.create_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id']) + + # Adding model 'OfflineComputedGradeLog' + db.create_table('courseware_offlinecomputedgradelog', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('seconds', self.gf('django.db.models.fields.IntegerField')(default=0)), + ('nstudents', self.gf('django.db.models.fields.IntegerField')(default=0)), + )) + db.send_create_signal('courseware', ['OfflineComputedGradeLog']) + + + def backwards(self, orm): + # Removing unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id'] + db.delete_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id']) + + # Deleting model 'OfflineComputedGrade' + db.delete_table('courseware_offlinecomputedgrade') + + # Deleting model 'OfflineComputedGradeLog' + db.delete_table('courseware_offlinecomputedgradelog') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'courseware.offlinecomputedgrade': { + 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'gradeset': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + }, + 'courseware.offlinecomputedgradelog': { + 'Meta': {'object_name': 'OfflineComputedGradeLog'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'nstudents': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'seconds': ('django.db.models.fields.IntegerField', [], {'default': '0'}) + }, + 'courseware.studentmodule': { + 'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), + 'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}), + 'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), + 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}), + 'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}), + 'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + } + } + + complete_apps = ['courseware'] \ No newline at end of file diff --git a/lms/djangoapps/courseware/models.py b/lms/djangoapps/courseware/models.py index ffc7c929de..21ef8b3d66 100644 --- a/lms/djangoapps/courseware/models.py +++ b/lms/djangoapps/courseware/models.py @@ -177,3 +177,40 @@ class StudentModuleCache(object): def append(self, student_module): self.cache.append(student_module) + + +class OfflineComputedGrade(models.Model): + """ + Table of grades computed offline for a given user and course. + """ + user = models.ForeignKey(User, db_index=True) + course_id = models.CharField(max_length=255, db_index=True) + + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + updated = models.DateTimeField(auto_now=True, db_index=True) + + gradeset = models.TextField(null=True, blank=True) # grades, stored as JSON + + class Meta: + unique_together = (('user', 'course_id'), ) + + def __unicode__(self): + return "[OfflineComputedGrade] %s: %s (%s) = %s" % (self.user, self.course_id, self.created, self.gradeset) + + +class OfflineComputedGradeLog(models.Model): + """ + Log of when offline grades are computed. + Use this to be able to show instructor when the last computed grades were done. + """ + class Meta: + ordering = ["-created"] + get_latest_by = "created" + + course_id = models.CharField(max_length=255, db_index=True) + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + seconds = models.IntegerField(default=0) # seconds elapsed for computation + nstudents = models.IntegerField(default=0) + + def __unicode__(self): + return "[OCGLog] %s: %s" % (self.course_id, self.created) diff --git a/lms/djangoapps/courseware/views.py b/lms/djangoapps/courseware/views.py index 276af80ca9..9e52e2b281 100644 --- a/lms/djangoapps/courseware/views.py +++ b/lms/djangoapps/courseware/views.py @@ -17,7 +17,7 @@ from django.views.decorators.cache import cache_control from courseware import grades from courseware.access import has_access -from courseware.courses import (get_course_with_access, get_courses_by_university) +from courseware.courses import (get_courses, get_course_with_access, get_courses_by_university) import courseware.tabs as tabs from courseware.models import StudentModuleCache from module_render import toc_for_course, get_module, get_instance_module @@ -61,16 +61,19 @@ def user_groups(user): return group_names - @ensure_csrf_cookie @cache_if_anonymous def courses(request): ''' Render "find courses" page. The course selection work is done in courseware.courses. ''' - universities = get_courses_by_university(request.user, - domain=request.META.get('HTTP_HOST')) - return render_to_response("courseware/courses.html", {'universities': universities}) + courses = get_courses(request.user, domain=request.META.get('HTTP_HOST')) + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) + + return render_to_response("courseware/courses.html", {'courses': courses}) def render_accordion(request, course, chapter, section): @@ -317,7 +320,7 @@ def jump_to(request, course_id, location): except NoPathToItem: raise Http404("This location is not in any class: {0}".format(location)) - # choose the appropriate view (and provide the necessary args) based on the + # choose the appropriate view (and provide the necessary args) based on the # args provided by the redirect. # Rely on index to do all error handling and access control. if chapter is None: @@ -328,7 +331,7 @@ def jump_to(request, course_id, location): return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section) else: return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position) - + @ensure_csrf_cookie def course_info(request, course_id): """ @@ -435,6 +438,11 @@ def university_profile(request, org_id): # Only grab courses for this org... courses = get_courses_by_university(request.user, domain=request.META.get('HTTP_HOST'))[org_id] + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) + context = dict(courses=courses, org_id=org_id) template_file = "university_profile/{0}.html".format(org_id).lower() diff --git a/lms/djangoapps/django_comment_client/models.py b/lms/djangoapps/django_comment_client/models.py index 628ac21a4a..a6a2c23603 100644 --- a/lms/djangoapps/django_comment_client/models.py +++ b/lms/djangoapps/django_comment_client/models.py @@ -2,6 +2,10 @@ import logging from django.db import models from django.contrib.auth.models import User +from django.dispatch import receiver +from django.db.models.signals import post_save + +from student.models import CourseEnrollment from courseware.courses import get_course_by_id @@ -45,3 +49,14 @@ class Permission(models.Model): def __unicode__(self): return self.name + + +@receiver(post_save, sender=CourseEnrollment) +def assign_default_role(sender, instance, **kwargs): + if instance.user.is_staff: + role = Role.objects.get_or_create(course_id=instance.course_id, name="Moderator")[0] + else: + role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0] + + logging.info("assign_default_role: adding %s as %s" % (instance.user, role)) + instance.user.roles.add(role) diff --git a/lms/djangoapps/instructor/management/commands/compute_grades.py b/lms/djangoapps/instructor/management/commands/compute_grades.py new file mode 100644 index 0000000000..462833ba3c --- /dev/null +++ b/lms/djangoapps/instructor/management/commands/compute_grades.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# +# django management command: dump grades to csv files +# for use by batch processes + +import os, sys, string +import datetime +import json + +#import student.models +from instructor.offline_gradecalc import * +from courseware.courses import get_course_by_id +from xmodule.modulestore.django import modulestore + +from django.conf import settings +from django.core.management.base import BaseCommand + +class Command(BaseCommand): + help = "Compute grades for all students in a course, and store result in DB.\n" + help += "Usage: compute_grades course_id_or_dir \n" + help += " course_id_or_dir: either course_id or course_dir\n" + help += 'Example course_id: MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section' + + def handle(self, *args, **options): + + print "args = ", args + + if len(args)>0: + course_id = args[0] + else: + print self.help + return + + try: + course = get_course_by_id(course_id) + except Exception as err: + if course_id in modulestore().courses: + course = modulestore().courses[course_id] + else: + print "-----------------------------------------------------------------------------" + print "Sorry, cannot find course %s" % course_id + print "Please provide a course ID or course data directory name, eg content-mit-801rq" + return + + print "-----------------------------------------------------------------------------" + print "Computing grades for %s" % (course.id) + + offline_grade_calculation(course.id) + + + + diff --git a/lms/djangoapps/instructor/offline_gradecalc.py b/lms/djangoapps/instructor/offline_gradecalc.py new file mode 100644 index 0000000000..7c102805b4 --- /dev/null +++ b/lms/djangoapps/instructor/offline_gradecalc.py @@ -0,0 +1,103 @@ +# ======== Offline calculation of grades ============================================================================= +# +# Computing grades of a large number of students can take a long time. These routines allow grades to +# be computed offline, by a batch process (eg cronjob). +# +# The grades are stored in the OfflineComputedGrade table of the courseware model. + +import json +import logging +import time + +import courseware.models + +from collections import namedtuple +from json import JSONEncoder +from courseware import grades, models +from courseware.courses import get_course_by_id +from django.contrib.auth.models import User, Group + + +class MyEncoder(JSONEncoder): + + def _iterencode(self, obj, markers=None): + if isinstance(obj, tuple) and hasattr(obj, '_asdict'): + gen = self._iterencode_dict(obj._asdict(), markers) + else: + gen = JSONEncoder._iterencode(self, obj, markers) + for chunk in gen: + yield chunk + + +def offline_grade_calculation(course_id): + ''' + Compute grades for all students for a specified course, and save results to the DB. + ''' + + tstart = time.time() + enrolled_students = User.objects.filter(courseenrollment__course_id=course_id).prefetch_related("groups").order_by('username') + + enc = MyEncoder() + + class DummyRequest(object): + META = {} + def __init__(self): + return + def get_host(self): + return 'edx.mit.edu' + def is_secure(self): + return False + + request = DummyRequest() + + print "%d enrolled students" % len(enrolled_students) + course = get_course_by_id(course_id) + + for student in enrolled_students: + gradeset = grades.grade(student, request, course, keep_raw_scores=True) + gs = enc.encode(gradeset) + ocg, created = models.OfflineComputedGrade.objects.get_or_create(user=student, course_id=course_id) + ocg.gradeset = gs + ocg.save() + print "%s done" % student # print statement used because this is run by a management command + + tend = time.time() + dt = tend - tstart + + ocgl = models.OfflineComputedGradeLog(course_id=course_id, seconds=dt, nstudents=len(enrolled_students)) + ocgl.save() + print ocgl + print "All Done!" + + +def offline_grades_available(course_id): + ''' + Returns False if no offline grades available for specified course. + Otherwise returns latest log field entry about the available pre-computed grades. + ''' + ocgl = models.OfflineComputedGradeLog.objects.filter(course_id=course_id) + if not ocgl: + return False + return ocgl.latest('created') + + +def student_grades(student, request, course, keep_raw_scores=False, use_offline=False): + ''' + This is the main interface to get grades. It has the same parameters as grades.grade, as well + as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB. + ''' + + if not use_offline: + return grades.grade(student, request, course, keep_raw_scores=keep_raw_scores) + + try: + ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id) + except models.OfflineComputedGrade.DoesNotExist: + return dict(raw_scores=[], section_breakdown=[], + msg='Error: no offline gradeset available for %s, %s' % (student, course.id)) + + return json.loads(ocg.gradeset) + + + + diff --git a/lms/djangoapps/instructor/tests.py b/lms/djangoapps/instructor/tests.py index 865a97951e..2d17cee47d 100644 --- a/lms/djangoapps/instructor/tests.py +++ b/lms/djangoapps/instructor/tests.py @@ -25,7 +25,6 @@ from django_comment_client.models import Role, FORUM_ROLE_ADMINISTRATOR, \ FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_STUDENT from django_comment_client.utils import has_forum_access -from instructor import staff_grading_service from courseware.access import _course_staff_group_name import courseware.tests.tests as ct from xmodule.modulestore.django import modulestore @@ -100,7 +99,6 @@ def action_name(operation, rolename): return '{0} forum {1}'.format(operation, FORUM_ADMIN_ACTION_SUFFIX[rolename]) -_mock_service = staff_grading_service.MockStaffGradingService() @override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) class TestInstructorDashboardForumAdmin(ct.PageLoader): @@ -223,94 +221,3 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader): self.assertTrue(response.content.find('{0}'.format(roles))>=0, 'not finding roles "{0}"'.format(roles)) -@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) -class TestStaffGradingService(ct.PageLoader): - ''' - Check that staff grading service proxy works. Basically just checking the - access control and error handling logic -- all the actual work is on the - backend. - ''' - def setUp(self): - xmodule.modulestore.django._MODULESTORES = {} - - self.student = 'view@test.com' - self.instructor = 'view2@test.com' - self.password = 'foo' - self.location = 'TestLocation' - self.create_account('u1', self.student, self.password) - self.create_account('u2', self.instructor, self.password) - self.activate_user(self.student) - self.activate_user(self.instructor) - - self.course_id = "edX/toy/2012_Fall" - self.toy = modulestore().get_course(self.course_id) - def make_instructor(course): - group_name = _course_staff_group_name(course.location) - g = Group.objects.create(name=group_name) - g.user_set.add(ct.user(self.instructor)) - - make_instructor(self.toy) - - self.mock_service = staff_grading_service.grading_service() - - self.logout() - - def test_access(self): - """ - Make sure only staff have access. - """ - self.login(self.student, self.password) - - # both get and post should return 404 - for view_name in ('staff_grading_get_next', 'staff_grading_save_grade'): - url = reverse(view_name, kwargs={'course_id': self.course_id}) - self.check_for_get_code(404, url) - self.check_for_post_code(404, url) - - - def test_get_next(self): - self.login(self.instructor, self.password) - - url = reverse('staff_grading_get_next', kwargs={'course_id': self.course_id}) - data = {'location': self.location} - - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) - self.assertTrue(d['success']) - self.assertEquals(d['submission_id'], self.mock_service.cnt) - self.assertIsNotNone(d['submission']) - self.assertIsNotNone(d['num_graded']) - self.assertIsNotNone(d['min_for_ml']) - self.assertIsNotNone(d['num_pending']) - self.assertIsNotNone(d['prompt']) - self.assertIsNotNone(d['ml_error_info']) - self.assertIsNotNone(d['max_score']) - self.assertIsNotNone(d['rubric']) - - - def test_save_grade(self): - self.login(self.instructor, self.password) - - url = reverse('staff_grading_save_grade', kwargs={'course_id': self.course_id}) - - data = {'score': '12', - 'feedback': 'great!', - 'submission_id': '123', - 'location': self.location} - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) - self.assertTrue(d['success'], str(d)) - self.assertEquals(d['submission_id'], self.mock_service.cnt) - - def test_get_problem_list(self): - self.login(self.instructor, self.password) - - url = reverse('staff_grading_get_problem_list', kwargs={'course_id': self.course_id}) - data = {} - - r = self.check_for_post_code(200, url, data) - d = json.loads(r.content) - self.assertTrue(d['success'], str(d)) - self.assertIsNotNone(d['problem_list']) - - diff --git a/lms/djangoapps/instructor/views.py b/lms/djangoapps/instructor/views.py index 79cf0caaf3..2d58799efe 100644 --- a/lms/djangoapps/instructor/views.py +++ b/lms/djangoapps/instructor/views.py @@ -2,10 +2,14 @@ from collections import defaultdict import csv +import json import logging import os +import requests import urllib +from StringIO import StringIO + from django.conf import settings from django.contrib.auth.models import User, Group from django.http import HttpResponse @@ -20,7 +24,7 @@ from courseware.courses import get_course_with_access from django_comment_client.models import Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA from django_comment_client.utils import has_forum_access from psychometrics import psychoanalyze -from student.models import CourseEnrollment +from student.models import CourseEnrollment, CourseEnrollmentAllowed from xmodule.course_module import CourseDescriptor from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore @@ -28,8 +32,7 @@ from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundErr from xmodule.modulestore.search import path_to_location import track.views -from .grading import StaffGrading - +from .offline_gradecalc import student_grades, offline_grades_available log = logging.getLogger(__name__) @@ -76,9 +79,12 @@ def instructor_dashboard(request, course_id): data.append(['metadata', escape(str(course.metadata))]) datatable['data'] = data - def return_csv(fn, datatable): - response = HttpResponse(mimetype='text/csv') - response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) + def return_csv(fn, datatable, fp=None): + if fp is None: + response = HttpResponse(mimetype='text/csv') + response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) + else: + response = fp writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) writer.writerow(datatable['header']) for datarow in datatable['data']: @@ -87,16 +93,23 @@ def instructor_dashboard(request, course_id): return response def get_staff_group(course): - staffgrp = get_access_group_name(course, 'staff') + return get_group(course, 'staff') + + def get_instructor_group(course): + return get_group(course, 'instructor') + + def get_group(course, groupname): + grpname = get_access_group_name(course, groupname) try: - group = Group.objects.get(name=staffgrp) + group = Group.objects.get(name=grpname) except Group.DoesNotExist: - group = Group(name=staffgrp) # create the group + group = Group(name=grpname) # create the group group.save() return group # process actions from form POST action = request.POST.get('action', '') + use_offline = request.POST.get('use_offline_grades',False) if settings.MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD']: if 'GIT pull' in action: @@ -126,39 +139,98 @@ def instructor_dashboard(request, course_id): except Exception as err: msg += '

Error: {0}

'.format(escape(err)) - if action == 'Dump list of enrolled students': + if action == 'Dump list of enrolled students' or action=='List enrolled students': log.debug(action) - datatable = get_student_grade_summary_data(request, course, course_id, get_grades=False) + datatable = get_student_grade_summary_data(request, course, course_id, get_grades=False, use_offline=use_offline) datatable['title'] = 'List of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'list-students', {}, page='idashboard') elif 'Dump Grades' in action: log.debug(action) - datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True) + datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) datatable['title'] = 'Summary Grades of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'dump-grades', {}, page='idashboard') elif 'Dump all RAW grades' in action: log.debug(action) datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True, - get_raw_scores=True) + get_raw_scores=True, use_offline=use_offline) datatable['title'] = 'Raw Grades of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'dump-grades-raw', {}, page='idashboard') elif 'Download CSV of all student grades' in action: track.views.server_track(request, 'dump-grades-csv', {}, page='idashboard') return return_csv('grades_{0}.csv'.format(course_id), - get_student_grade_summary_data(request, course, course_id)) + get_student_grade_summary_data(request, course, course_id, use_offline=use_offline)) elif 'Download CSV of all RAW grades' in action: track.views.server_track(request, 'dump-grades-csv-raw', {}, page='idashboard') return return_csv('grades_{0}_raw.csv'.format(course_id), - get_student_grade_summary_data(request, course, course_id, get_raw_scores=True)) + get_student_grade_summary_data(request, course, course_id, get_raw_scores=True, use_offline=use_offline)) elif 'Download CSV of answer distributions' in action: track.views.server_track(request, 'dump-answer-dist-csv', {}, page='idashboard') return return_csv('answer_dist_{0}.csv'.format(course_id), get_answers_distribution(request, course_id)) + #---------------------------------------- + # export grades to remote gradebook + + elif action=='List assignments available in remote gradebook': + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-assignments') + msg += msg2 + + elif action=='List assignments available for this course': + log.debug(action) + allgrades = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) + + assignments = [[x] for x in allgrades['assignments']] + datatable = {'header': ['Assignment Name']} + datatable['data'] = assignments + datatable['title'] = action + + msg += 'assignments=
%s
' % assignments + + elif action=='List enrolled students matching remote gradebook': + stud_data = get_student_grade_summary_data(request, course, course_id, get_grades=False, use_offline=use_offline) + msg2, rg_stud_data = _do_remote_gradebook(request.user, course, 'get-membership') + datatable = {'header': ['Student email', 'Match?']} + rg_students = [ x['email'] for x in rg_stud_data['retdata'] ] + def domatch(x): + return 'yes' if x.email in rg_students else 'No' + datatable['data'] = [[x.email, domatch(x)] for x in stud_data['students']] + datatable['title'] = action + + elif action in ['Display grades for assignment', 'Export grades for assignment to remote gradebook', + 'Export CSV file of grades for assignment']: + + log.debug(action) + datatable = {} + aname = request.POST.get('assignment_name','') + if not aname: + msg += "Please enter an assignment name" + else: + allgrades = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) + if aname not in allgrades['assignments']: + msg += "Invalid assignment name '%s'" % aname + else: + aidx = allgrades['assignments'].index(aname) + datatable = {'header': ['External email', aname]} + datatable['data'] = [[x.email, x.grades[aidx]] for x in allgrades['students']] + datatable['title'] = 'Grades for assignment "%s"' % aname + + if 'Export CSV' in action: + # generate and return CSV file + return return_csv('grades %s.csv' % aname, datatable) + + elif 'remote gradebook' in action: + fp = StringIO() + return_csv('', datatable, fp=fp) + fp.seek(0) + files = {'datafile': fp} + msg2, dataset = _do_remote_gradebook(request.user, course, 'post-grades', files=files) + msg += msg2 + + #---------------------------------------- # Admin @@ -172,6 +244,16 @@ def instructor_dashboard(request, course_id): datatable['title'] = 'List of Staff in course {0}'.format(course_id) track.views.server_track(request, 'list-staff', {}, page='idashboard') + elif 'List course instructors' in action and request.user.is_staff: + group = get_instructor_group(course) + msg += 'Instructor group = {0}'.format(group.name) + log.debug('instructor grp={0}'.format(group.name)) + uset = group.user_set.all() + datatable = {'header': ['Username', 'Full name']} + datatable['data'] = [[x.username, x.profile.name] for x in uset] + datatable['title'] = 'List of Instructors in course {0}'.format(course_id) + track.views.server_track(request, 'list-instructors', {}, page='idashboard') + elif action == 'Add course staff': uname = request.POST['staffuser'] try: @@ -186,6 +268,20 @@ def instructor_dashboard(request, course_id): user.groups.add(group) track.views.server_track(request, 'add-staff {0}'.format(user), {}, page='idashboard') + elif action == 'Add instructor' and request.user.is_staff: + uname = request.POST['instructor'] + try: + user = User.objects.get(username=uname) + except User.DoesNotExist: + msg += 'Error: unknown username "{0}"'.format(uname) + user = None + if user is not None: + group = get_instructor_group(course) + msg += 'Added {0} to instructor group = {1}'.format(user, group.name) + log.debug('staffgrp={0}'.format(group.name)) + user.groups.add(group) + track.views.server_track(request, 'add-instructor {0}'.format(user), {}, page='idashboard') + elif action == 'Remove course staff': uname = request.POST['staffuser'] try: @@ -200,6 +296,20 @@ def instructor_dashboard(request, course_id): user.groups.remove(group) track.views.server_track(request, 'remove-staff {0}'.format(user), {}, page='idashboard') + elif action == 'Remove instructor' and request.user.is_staff: + uname = request.POST['instructor'] + try: + user = User.objects.get(username=uname) + except User.DoesNotExist: + msg += 'Error: unknown username "{0}"'.format(uname) + user = None + if user is not None: + group = get_instructor_group(course) + msg += 'Removed {0} from instructor group = {1}'.format(user, group.name) + log.debug('instructorgrp={0}'.format(group.name)) + user.groups.remove(group) + track.views.server_track(request, 'remove-instructor {0}'.format(user), {}, page='idashboard') + #---------------------------------------- # forum administration @@ -258,6 +368,71 @@ def instructor_dashboard(request, course_id): track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_COMMUNITY_TA, course_id), {}, page='idashboard') + #---------------------------------------- + # enrollment + + elif action == 'List students who may enroll but may not have yet signed up': + ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_id) + datatable = {'header': ['StudentEmail']} + datatable['data'] = [[x.email] for x in ceaset] + datatable['title'] = action + + elif action == 'Enroll student': + + student = request.POST.get('enstudent','') + ret = _do_enroll_students(course, course_id, student) + datatable = ret['datatable'] + + elif action == 'Un-enroll student': + + student = request.POST.get('enstudent','') + datatable = {} + isok = False + cea = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=student) + if cea: + cea.delete() + msg += "Un-enrolled student with email '%s'" % student + isok = True + try: + nce = CourseEnrollment.objects.get(user=User.objects.get(email=student), course_id=course_id) + nce.delete() + msg += "Un-enrolled student with email '%s'" % student + except Exception as err: + if not isok: + msg += "Error! Failed to un-enroll student with email '%s'\n" % student + msg += str(err) + '\n' + + elif action == 'Un-enroll ALL students': + + ret = _do_enroll_students(course, course_id, '', overload=True) + datatable = ret['datatable'] + + elif action == 'Enroll multiple students': + + students = request.POST.get('enroll_multiple','') + ret = _do_enroll_students(course, course_id, students) + datatable = ret['datatable'] + + elif action == 'List sections available in remote gradebook': + + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-sections') + msg += msg2 + + elif action in ['List students in section in remote gradebook', + 'Overload enrollment list using remote gradebook', + 'Merge enrollment list with remote gradebook']: + + section = request.POST.get('gradebook_section','') + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-membership', dict(section=section) ) + msg += msg2 + + if not 'List' in action: + students = ','.join([x['email'] for x in datatable['retdata']]) + overload = 'Overload' in action + ret = _do_enroll_students(course, course_id, students, overload=overload) + datatable = ret['datatable'] + + #---------------------------------------- # psychometrics @@ -271,9 +446,15 @@ def instructor_dashboard(request, course_id): problems = psychoanalyze.problems_with_psychometric_data(course_id) + #---------------------------------------- + # offline grades? + + if use_offline: + msg += "
Grades from %s" % offline_grades_available(course_id) #---------------------------------------- # context for rendering + context = {'course': course, 'staff_access': True, 'admin_access': request.user.is_staff, @@ -286,16 +467,66 @@ def instructor_dashboard(request, course_id): 'plots': plots, # psychometrics 'course_errors': modulestore().get_item_errors(course.location), 'djangopid' : os.getpid(), + 'mitx_version' : getattr(settings,'MITX_VERSION_STRING',''), + 'offline_grade_log' : offline_grades_available(course_id), } return render_to_response('courseware/instructor_dashboard.html', context) + +def _do_remote_gradebook(user, course, action, args=None, files=None): + ''' + Perform remote gradebook action. Returns msg, datatable. + ''' + rg = course.metadata.get('remote_gradebook','') + if not rg: + msg = "No remote gradebook defined in course metadata" + return msg, {} + + rgurl = settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') + if not rgurl: + msg = "No remote gradebook url defined in settings.MITX_FEATURES" + return msg, {} + + rgname = rg.get('name','') + if not rgname: + msg = "No gradebook name defined in course remote_gradebook metadata" + return msg, {} + + if args is None: + args = {} + data = dict(submit=action, gradebook=rgname, user=user.email) + data.update(args) + + try: + resp = requests.post(rgurl, data=data, verify=False, files=files) + retdict = json.loads(resp.content) + except Exception as err: + msg = "Failed to communicate with gradebook server at %s
" % rgurl + msg += "Error: %s" % err + msg += "
resp=%s" % resp.content + msg += "
data=%s" % data + return msg, {} + + msg = '
%s
' % retdict['msg'].replace('\n','
') + retdata = retdict['data'] # a list of dicts + + if retdata: + datatable = {'header': retdata[0].keys()} + datatable['data'] = [x.values() for x in retdata] + datatable['title'] = 'Remote gradebook response for %s' % action + datatable['retdata'] = retdata + else: + datatable = {} + + return msg, datatable + def _list_course_forum_members(course_id, rolename, datatable): ''' Fills in datatable with forum membership information, for a given role, so that it will be displayed on instructor dashboard. - course_ID = course's ID string + course_ID = the ID string for a course rolename = one of "Administrator", "Moderator", "Community TA" Returns message status string to append to displayed message, if role is unknown. @@ -360,7 +591,7 @@ def _update_forum_role_membership(uname, course, rolename, add_or_remove): return msg -def get_student_grade_summary_data(request, course, course_id, get_grades=True, get_raw_scores=False): +def get_student_grade_summary_data(request, course, course_id, get_grades=True, get_raw_scores=False, use_offline=False): ''' Return data arrays with student identity and grades for specified course. @@ -381,16 +612,18 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True, enrolled_students = User.objects.filter(courseenrollment__course_id=course_id).prefetch_related("groups").order_by('username') header = ['ID', 'Username', 'Full Name', 'edX email', 'External email'] + assignments = [] if get_grades and enrolled_students.count() > 0: # just to construct the header - gradeset = grades.grade(enrolled_students[0], request, course, keep_raw_scores=get_raw_scores) + gradeset = student_grades(enrolled_students[0], request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline) # log.debug('student {0} gradeset {1}'.format(enrolled_students[0], gradeset)) if get_raw_scores: - header += [score.section for score in gradeset['raw_scores']] + assignments += [score.section for score in gradeset['raw_scores']] else: - header += [x['label'] for x in gradeset['section_breakdown']] + assignments += [x['label'] for x in gradeset['section_breakdown']] + header += assignments - datatable = {'header': header} + datatable = {'header': header, 'assignments': assignments, 'students': enrolled_students} data = [] for student in enrolled_students: @@ -401,40 +634,21 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True, datarow.append('') if get_grades: - gradeset = grades.grade(student, request, course, keep_raw_scores=get_raw_scores) - # log.debug('student={0}, gradeset={1}'.format(student,gradeset)) + gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline) + log.debug('student={0}, gradeset={1}'.format(student,gradeset)) if get_raw_scores: - datarow += [score.earned for score in gradeset['raw_scores']] + # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned'] + sgrades = [(getattr(score,'earned','') or score[0]) for score in gradeset['raw_scores']] else: - datarow += [x['percent'] for x in gradeset['section_breakdown']] + sgrades = [x['percent'] for x in gradeset['section_breakdown']] + datarow += sgrades + student.grades = sgrades # store in student object data.append(datarow) datatable['data'] = data return datatable - - -@cache_control(no_cache=True, no_store=True, must_revalidate=True) -def staff_grading(request, course_id): - """ - Show the instructor grading interface. - """ - course = get_course_with_access(request.user, course_id, 'staff') - - grading = StaffGrading(course) - - ajax_url = reverse('staff_grading', kwargs={'course_id': course_id}) - if not ajax_url.endswith('/'): - ajax_url += '/' - - return render_to_response('instructor/staff_grading.html', { - 'view_html': grading.get_html(), - 'course': course, - 'course_id': course_id, - 'ajax_url': ajax_url, - # Checked above - 'staff_access': True, }) - +#----------------------------------------------------------------------------- @cache_control(no_cache=True, no_store=True, must_revalidate=True) def gradebook(request, course_id): @@ -453,7 +667,7 @@ def gradebook(request, course_id): student_info = [{'username': student.username, 'id': student.id, 'email': student.email, - 'grade_summary': grades.grade(student, request, course), + 'grade_summary': student_grades(student, request, course), 'realname': student.profile.name, } for student in enrolled_students] @@ -476,6 +690,72 @@ def grade_summary(request, course_id): return render_to_response('courseware/grade_summary.html', context) +#----------------------------------------------------------------------------- +# enrollment + + +def _do_enroll_students(course, course_id, students, overload=False): + """Do the actual work of enrolling multiple students, presented as a string + of emails separated by commas or returns""" + + ns = [x.split('\n') for x in students.split(',')] + new_students = [item for sublist in ns for item in sublist] + new_students = [str(s.strip()) for s in new_students] + new_students_lc = [x.lower() for x in new_students] + + if '' in new_students: + new_students.remove('') + + status = dict([x,'unprocessed'] for x in new_students) + + if overload: # delete all but staff + todelete = CourseEnrollment.objects.filter(course_id=course_id) + for ce in todelete: + if not has_access(ce.user, course, 'staff') and ce.user.email.lower() not in new_students_lc: + status[ce.user.email] = 'deleted' + ce.delete() + else: + status[ce.user.email] = 'is staff' + ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_id) + for cea in ceaset: + status[cea.email] = 'removed from pending enrollment list' + ceaset.delete() + + for student in new_students: + try: + user=User.objects.get(email=student) + except User.DoesNotExist: + # user not signed up yet, put in pending enrollment allowed table + if CourseEnrollmentAllowed.objects.filter(email=student, course_id=course_id): + status[student] = 'user does not exist, enrollment already allowed, pending' + continue + cea = CourseEnrollmentAllowed(email=student, course_id=course_id) + cea.save() + status[student] = 'user does not exist, enrollment allowed, pending' + continue + + if CourseEnrollment.objects.filter(user=user, course_id=course_id): + status[student] = 'already enrolled' + continue + try: + nce = CourseEnrollment(user=user, course_id=course_id) + nce.save() + status[student] = 'added' + except: + status[student] = 'rejected' + + datatable = {'header': ['StudentEmail', 'action']} + datatable['data'] = [[x, status[x]] for x in status] + datatable['title'] = 'Enrollment of students' + + def sf(stat): return [x for x in status if status[x]==stat] + + data = dict(added=sf('added'), rejected=sf('rejected')+sf('exists'), + deleted=sf('deleted'), datatable=datatable) + + return data + + @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) def enroll_students(request, course_id): @@ -494,22 +774,10 @@ def enroll_students(request, course_id): course = get_course_with_access(request.user, course_id, 'staff') existing_students = [ce.user.email for ce in CourseEnrollment.objects.filter(course_id=course_id)] - if 'new_students' in request.POST: - new_students = request.POST['new_students'].split('\n') - else: - new_students = [] - new_students = [s.strip() for s in new_students] - - added_students = [] - rejected_students = [] - - for student in new_students: - try: - nce = CourseEnrollment(user=User.objects.get(email=student), course_id=course_id) - nce.save() - added_students.append(student) - except: - rejected_students.append(student) + new_students = request.POST.get('new_students') + ret = _do_enroll_students(course, course_id, new_students) + added_students = ret['added'] + rejected_students = ret['rejected'] return render_to_response("enroll_students.html", {'course': course_id, 'existing_students': existing_students, @@ -518,6 +786,9 @@ def enroll_students(request, course_id): 'debug': new_students}) +#----------------------------------------------------------------------------- +# answer distribution + def get_answers_distribution(request, course_id): """ Get the distribution of answers for all graded problems in the course. diff --git a/lms/djangoapps/open_ended_grading/__init__.py b/lms/djangoapps/open_ended_grading/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/lms/djangoapps/open_ended_grading/grading_service.py new file mode 100644 index 0000000000..7362411daa --- /dev/null +++ b/lms/djangoapps/open_ended_grading/grading_service.py @@ -0,0 +1,100 @@ +# This class gives a common interface for logging into the grading controller +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys + +from django.conf import settings +from django.http import HttpResponse, Http404 + +from courseware.access import has_access +from util.json_request import expect_json +from xmodule.course_module import CourseDescriptor + +log = logging.getLogger(__name__) + +class GradingServiceError(Exception): + pass + +class GradingService(object): + """ + Interface to staff grading backend. + """ + def __init__(self, config): + self.username = config['username'] + self.password = config['password'] + self.url = config['url'] + self.login_url = self.url + '/login/' + self.session = requests.session() + + def _login(self): + """ + Log into the staff grading service. + + Raises requests.exceptions.HTTPError if something goes wrong. + + Returns the decoded json dict of the response. + """ + response = self.session.post(self.login_url, + {'username': self.username, + 'password': self.password,}) + + response.raise_for_status() + + return response.json + + def post(self, url, data, allow_redirects=False): + """ + Make a post request to the grading controller + """ + try: + op = lambda: self.session.post(url, data=data, + allow_redirects=allow_redirects) + r = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError) as err: + # reraise as promised GradingServiceError, but preserve stacktrace. + raise GradingServiceError, str(err), sys.exc_info()[2] + + return r.text + + def get(self, url, params, allow_redirects=False): + """ + Make a get request to the grading controller + """ + log.debug(params) + op = lambda: self.session.get(url, + allow_redirects=allow_redirects, + params=params) + try: + r = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError) as err: + # reraise as promised GradingServiceError, but preserve stacktrace. + raise GradingServiceError, str(err), sys.exc_info()[2] + + return r.text + + + def _try_with_login(self, operation): + """ + Call operation(), which should return a requests response object. If + the request fails with a 'login_required' error, call _login() and try + the operation again. + + Returns the result of operation(). Does not catch exceptions. + """ + response = operation() + if (response.json + and response.json.get('success') == False + and response.json.get('error') == 'login_required'): + # apparrently we aren't logged in. Try to fix that. + r = self._login() + if r and not r.get('success'): + log.warning("Couldn't log into staff_grading backend. Response: %s", + r) + # try again + response = operation() + response.raise_for_status() + + return response + diff --git a/lms/djangoapps/open_ended_grading/peer_grading_service.py b/lms/djangoapps/open_ended_grading/peer_grading_service.py new file mode 100644 index 0000000000..9ef0383fb5 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/peer_grading_service.py @@ -0,0 +1,355 @@ +""" +This module provides an interface on the grading-service backend +for peer grading + +Use peer_grading_service() to get the version specified +in settings.PEER_GRADING_INTERFACE + +""" +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys + +from django.conf import settings +from django.http import HttpResponse, Http404 +from grading_service import GradingService +from grading_service import GradingServiceError + +from courseware.access import has_access +from util.json_request import expect_json +from xmodule.course_module import CourseDescriptor +from student.models import unique_id_for_user + +log = logging.getLogger(__name__) + +""" +This is a mock peer grading service that can be used for unit tests +without making actual service calls to the grading controller +""" +class MockPeerGradingService(object): + def get_next_submission(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': "", + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_grade(self, location, grader_id, submission_id, + score, feedback, submission_key): + return json.dumps({'success': True}) + + def is_student_calibrated(self, problem_location, grader_id): + return json.dumps({'success': True, 'calibrated': True}) + + def show_calibration_essay(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': '', + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_calibration_essay(self, problem_location, grader_id, + calibration_essay_id, submission_key, score, feedback): + return {'success': True, 'actual_score': 2} + + def get_problem_list(self, course_id, grader_id): + return json.dumps({'success': True, + 'problem_list': [ + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', + 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}), + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', + 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) + ]}) + +class PeerGradingService(GradingService): + """ + Interface with the grading controller for peer grading + """ + def __init__(self, config): + super(PeerGradingService, self).__init__(config) + self.get_next_submission_url = self.url + '/get_next_submission/' + self.save_grade_url = self.url + '/save_grade/' + self.is_student_calibrated_url = self.url + '/is_student_calibrated/' + self.show_calibration_essay_url = self.url + '/show_calibration_essay/' + self.save_calibration_essay_url = self.url + '/save_calibration_essay/' + self.get_problem_list_url = self.url + '/get_problem_list/' + + def get_next_submission(self, problem_location, grader_id): + response = self.get(self.get_next_submission_url, + {'location': problem_location, 'grader_id': grader_id}) + return response + + def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key): + data = {'grader_id' : grader_id, + 'submission_id' : submission_id, + 'score' : score, + 'feedback' : feedback, + 'submission_key': submission_key, + 'location': location} + return self.post(self.save_grade_url, data) + + def is_student_calibrated(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + return self.get(self.is_student_calibrated_url, params) + + def show_calibration_essay(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + return self.get(self.show_calibration_essay_url, params) + + def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback): + data = {'location': problem_location, + 'student_id': grader_id, + 'calibration_essay_id': calibration_essay_id, + 'submission_key': submission_key, + 'score': score, + 'feedback': feedback} + return self.post(self.save_calibration_essay_url, data) + + def get_problem_list(self, course_id, grader_id): + params = {'course_id': course_id, 'student_id': grader_id} + response = self.get(self.get_problem_list_url, params) + return response + + +_service = None +def peer_grading_service(): + """ + Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, + returns a mock one, otherwise a real one. + + Caches the result, so changing the setting after the first call to this + function will have no effect. + """ + global _service + if _service is not None: + return _service + + if settings.MOCK_PEER_GRADING: + _service = MockPeerGradingService() + else: + _service = PeerGradingService(settings.PEER_GRADING_INTERFACE) + + return _service + +def _err_response(msg): + """ + Return a HttpResponse with a json dump with success=False, and the given error message. + """ + return HttpResponse(json.dumps({'success': False, 'error': msg}), + mimetype="application/json") + +def _check_required(request, required): + actual = set(request.POST.keys()) + missing = required - actual + if len(missing) > 0: + return False, "Missing required keys: {0}".format(', '.join(missing)) + else: + return True, "" + +def _check_post(request): + if request.method != 'POST': + raise Http404 + + +def get_next_submission(request, course_id): + """ + Makes a call to the grading controller for the next essay that should be graded + Returns a json dict with the following keys: + + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + """ + _check_post(request) + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + + try: + response = peer_grading_service().get_next_submission(location, grader_id) + return HttpResponse(response, + mimetype="application/json") + except GradingServiceError: + log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" + .format(staff_grading_service().url, location, grader_id)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + +def save_grade(request, course_id): + """ + Saves the grade of a given submission. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + """ + _check_post(request) + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + submission_id = p['submission_id'] + score = p['score'] + feedback = p['feedback'] + submission_key = p['submission_key'] + try: + response = peer_grading_service().save_grade(location, grader_id, submission_id, + score, feedback, submission_key) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, + submission_key: {3}, score: {4}""" + .format(staff_grading_service().url, + location, submission_id, submission_key, score) + ) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + + +def is_student_calibrated(request, course_id): + """ + Calls the grading controller to see if the given student is calibrated + on the given problem + + Input: + In the request, we need the following arguments: + location - problem location + + Returns: + Json object with the following keys + success - bool indicating whether or not the call was successful + calibrated - true if the grader has fully calibrated and can now move on to grading + - false if the grader is still working on calibration problems + total_calibrated_on_so_far - the number of calibration essays for this problem + that this grader has graded + """ + _check_post(request) + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + + try: + response = peer_grading_service().is_student_calibrated(location, grader_id) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" + .format(staff_grading_service().url, grader_id, location)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + + +def show_calibration_essay(request, course_id): + """ + Fetch the next calibration essay from the grading controller and return it + Inputs: + In the request + location - problem location + + Returns: + A json dict with the following keys + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + + """ + _check_post(request) + + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + try: + response = peer_grading_service().show_calibration_essay(location, grader_id) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, location: {0}" + .format(staff_grading_service().url, location)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + +def save_calibration_essay(request, course_id): + """ + Saves the grader's grade of a given calibration. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + actual_score: the score that the instructor gave to this calibration essay + + """ + _check_post(request) + + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + calibration_essay_id = p['submission_id'] + submission_key = p['submission_key'] + score = p['score'] + feedback = p['feedback'] + + try: + response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, submission_key, score, feedback) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) + return _err_response('Could not connect to grading service') diff --git a/lms/djangoapps/instructor/grading.py b/lms/djangoapps/open_ended_grading/staff_grading.py similarity index 100% rename from lms/djangoapps/instructor/grading.py rename to lms/djangoapps/open_ended_grading/staff_grading.py diff --git a/lms/djangoapps/instructor/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py similarity index 71% rename from lms/djangoapps/instructor/staff_grading_service.py rename to lms/djangoapps/open_ended_grading/staff_grading_service.py index ea8f0de074..5c6cec17eb 100644 --- a/lms/djangoapps/instructor/staff_grading_service.py +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -7,6 +7,8 @@ import logging import requests from requests.exceptions import RequestException, ConnectionError, HTTPError import sys +from grading_service import GradingService +from grading_service import GradingServiceError from django.conf import settings from django.http import HttpResponse, Http404 @@ -14,13 +16,11 @@ from django.http import HttpResponse, Http404 from courseware.access import has_access from util.json_request import expect_json from xmodule.course_module import CourseDescriptor +from student.models import unique_id_for_user log = logging.getLogger(__name__) -class GradingServiceError(Exception): - pass - class MockStaffGradingService(object): """ @@ -57,62 +57,16 @@ class MockStaffGradingService(object): return self.get_next(course_id, 'fake location', grader_id) -class StaffGradingService(object): +class StaffGradingService(GradingService): """ Interface to staff grading backend. """ def __init__(self, config): - self.username = config['username'] - self.password = config['password'] - self.url = config['url'] - - self.login_url = self.url + '/login/' + super(StaffGradingService, self).__init__(config) self.get_next_url = self.url + '/get_next_submission/' self.save_grade_url = self.url + '/save_grade/' self.get_problem_list_url = self.url + '/get_problem_list/' - self.session = requests.session() - - - def _login(self): - """ - Log into the staff grading service. - - Raises requests.exceptions.HTTPError if something goes wrong. - - Returns the decoded json dict of the response. - """ - response = self.session.post(self.login_url, - {'username': self.username, - 'password': self.password,}) - - response.raise_for_status() - - return response.json - - - def _try_with_login(self, operation): - """ - Call operation(), which should return a requests response object. If - the request fails with a 'login_required' error, call _login() and try - the operation again. - - Returns the result of operation(). Does not catch exceptions. - """ - response = operation() - if (response.json - and response.json.get('success') == False - and response.json.get('error') == 'login_required'): - # apparrently we aren't logged in. Try to fix that. - r = self._login() - if r and not r.get('success'): - log.warning("Couldn't log into staff_grading backend. Response: %s", - r) - # try again - response = operation() - response.raise_for_status() - - return response def get_problem_list(self, course_id, grader_id): """ @@ -130,17 +84,8 @@ class StaffGradingService(object): Raises: GradingServiceError: something went wrong with the connection. """ - op = lambda: self.session.get(self.get_problem_list_url, - allow_redirects = False, - params={'course_id': course_id, - 'grader_id': grader_id}) - try: - r = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError) as err: - # reraise as promised GradingServiceError, but preserve stacktrace. - raise GradingServiceError, str(err), sys.exc_info()[2] - - return r.text + params = {'course_id': course_id,'grader_id': grader_id} + return self.get(self.get_problem_list_url, params) def get_next(self, course_id, location, grader_id): @@ -161,17 +106,9 @@ class StaffGradingService(object): Raises: GradingServiceError: something went wrong with the connection. """ - op = lambda: self.session.get(self.get_next_url, - allow_redirects=False, + return self.get(self.get_next_url, params={'location': location, 'grader_id': grader_id}) - try: - r = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError) as err: - # reraise as promised GradingServiceError, but preserve stacktrace. - raise GradingServiceError, str(err), sys.exc_info()[2] - - return r.text def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped): @@ -186,28 +123,20 @@ class StaffGradingService(object): Raises: GradingServiceError if there's a problem connecting. """ - try: - data = {'course_id': course_id, - 'submission_id': submission_id, - 'score': score, - 'feedback': feedback, - 'grader_id': grader_id, - 'skipped': skipped} + data = {'course_id': course_id, + 'submission_id': submission_id, + 'score': score, + 'feedback': feedback, + 'grader_id': grader_id, + 'skipped': skipped} - op = lambda: self.session.post(self.save_grade_url, data=data, - allow_redirects=False) - r = self._try_with_login(op) - except (RequestException, ConnectionError, HTTPError) as err: - # reraise as promised GradingServiceError, but preserve stacktrace. - raise GradingServiceError, str(err), sys.exc_info()[2] + return self.post(self.save_grade_url, data=data) - return r.text - -# don't initialize until grading_service() is called--means that just +# don't initialize until staff_grading_service() is called--means that just # importing this file doesn't create objects that may not have the right config _service = None -def grading_service(): +def staff_grading_service(): """ Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True, returns a mock one, otherwise a real one. @@ -248,7 +177,7 @@ def _check_access(user, course_id): def get_next(request, course_id): """ Get the next thing to grade for course_id and with the location specified - in the . + in the request. Returns a json dict with the following keys: @@ -276,11 +205,11 @@ def get_next(request, course_id): if len(missing) > 0: return _err_response('Missing required keys {0}'.format( ', '.join(missing))) - grader_id = request.user.id + grader_id = unique_id_for_user(request.user) p = request.POST location = p['location'] - return HttpResponse(_get_next(course_id, request.user.id, location), + return HttpResponse(_get_next(course_id, grader_id, location), mimetype="application/json") @@ -308,12 +237,12 @@ def get_problem_list(request, course_id): """ _check_access(request.user, course_id) try: - response = grading_service().get_problem_list(course_id, request.user.id) + response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user)) return HttpResponse(response, mimetype="application/json") except GradingServiceError: log.exception("Error from grading service. server url: {0}" - .format(grading_service().url)) + .format(staff_grading_service().url)) return HttpResponse(json.dumps({'success': False, 'error': 'Could not connect to grading service'})) @@ -323,10 +252,10 @@ def _get_next(course_id, grader_id, location): Implementation of get_next (also called from save_grade) -- returns a json string """ try: - return grading_service().get_next(course_id, location, grader_id) + return staff_grading_service().get_next(course_id, location, grader_id) except GradingServiceError: log.exception("Error from grading service. server url: {0}" - .format(grading_service().url)) + .format(staff_grading_service().url)) return json.dumps({'success': False, 'error': 'Could not connect to grading service'}) @@ -357,14 +286,14 @@ def save_grade(request, course_id): return _err_response('Missing required keys {0}'.format( ', '.join(missing))) - grader_id = request.user.id + grader_id = unique_id_for_user(request.user) p = request.POST location = p['location'] skipped = 'skipped' in p try: - result_json = grading_service().save_grade(course_id, + result_json = staff_grading_service().save_grade(course_id, grader_id, p['submission_id'], p['score'], diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py new file mode 100644 index 0000000000..0c4376a44b --- /dev/null +++ b/lms/djangoapps/open_ended_grading/tests.py @@ -0,0 +1,112 @@ +""" +Tests for open ended grading interfaces + +django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open_ended_grading +""" + +from django.test import TestCase +from open_ended_grading import staff_grading_service +from django.core.urlresolvers import reverse +from django.contrib.auth.models import Group + +from courseware.access import _course_staff_group_name +import courseware.tests.tests as ct +from xmodule.modulestore.django import modulestore +import xmodule.modulestore.django +from nose import SkipTest +from mock import patch, Mock +import json + +from override_settings import override_settings + +_mock_service = staff_grading_service.MockStaffGradingService() + +@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) +class TestStaffGradingService(ct.PageLoader): + ''' + Check that staff grading service proxy works. Basically just checking the + access control and error handling logic -- all the actual work is on the + backend. + ''' + def setUp(self): + xmodule.modulestore.django._MODULESTORES = {} + + self.student = 'view@test.com' + self.instructor = 'view2@test.com' + self.password = 'foo' + self.location = 'TestLocation' + self.create_account('u1', self.student, self.password) + self.create_account('u2', self.instructor, self.password) + self.activate_user(self.student) + self.activate_user(self.instructor) + + self.course_id = "edX/toy/2012_Fall" + self.toy = modulestore().get_course(self.course_id) + def make_instructor(course): + group_name = _course_staff_group_name(course.location) + g = Group.objects.create(name=group_name) + g.user_set.add(ct.user(self.instructor)) + + make_instructor(self.toy) + + self.mock_service = staff_grading_service.staff_grading_service() + + self.logout() + + def test_access(self): + """ + Make sure only staff have access. + """ + self.login(self.student, self.password) + + # both get and post should return 404 + for view_name in ('staff_grading_get_next', 'staff_grading_save_grade'): + url = reverse(view_name, kwargs={'course_id': self.course_id}) + self.check_for_get_code(404, url) + self.check_for_post_code(404, url) + + + def test_get_next(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_get_next', kwargs={'course_id': self.course_id}) + data = {'location': self.location} + + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success']) + self.assertEquals(d['submission_id'], self.mock_service.cnt) + self.assertIsNotNone(d['submission']) + self.assertIsNotNone(d['num_graded']) + self.assertIsNotNone(d['min_for_ml']) + self.assertIsNotNone(d['num_pending']) + self.assertIsNotNone(d['prompt']) + self.assertIsNotNone(d['ml_error_info']) + self.assertIsNotNone(d['max_score']) + self.assertIsNotNone(d['rubric']) + + + def test_save_grade(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_save_grade', kwargs={'course_id': self.course_id}) + + data = {'score': '12', + 'feedback': 'great!', + 'submission_id': '123', + 'location': self.location} + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success'], str(d)) + self.assertEquals(d['submission_id'], self.mock_service.cnt) + + def test_get_problem_list(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_get_problem_list', kwargs={'course_id': self.course_id}) + data = {} + + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success'], str(d)) + self.assertIsNotNone(d['problem_list']) diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py new file mode 100644 index 0000000000..858c9a4fd5 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/views.py @@ -0,0 +1,118 @@ +# Grading Views + +import logging +import urllib + +from django.conf import settings +from django.views.decorators.cache import cache_control +from mitxmako.shortcuts import render_to_response +from django.core.urlresolvers import reverse + +from student.models import unique_id_for_user +from courseware.courses import get_course_with_access + +from peer_grading_service import PeerGradingService +from peer_grading_service import MockPeerGradingService +from grading_service import GradingServiceError +import json +from .staff_grading import StaffGrading + + +log = logging.getLogger(__name__) + +template_imports = {'urllib': urllib} +if settings.MOCK_PEER_GRADING: + peer_gs = MockPeerGradingService() +else: + peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE) + +""" +Reverses the URL from the name and the course id, and then adds a trailing slash if +it does not exist yet + +""" +def _reverse_with_slash(url_name, course_id): + ajax_url = reverse(url_name, kwargs={'course_id': course_id}) + if not ajax_url.endswith('/'): + ajax_url += '/' + return ajax_url + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def staff_grading(request, course_id): + """ + Show the instructor grading interface. + """ + course = get_course_with_access(request.user, course_id, 'staff') + + ajax_url = _reverse_with_slash('staff_grading', course_id) + + return render_to_response('instructor/staff_grading.html', { + 'course': course, + 'course_id': course_id, + 'ajax_url': ajax_url, + # Checked above + 'staff_access': True, }) + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def peer_grading(request, course_id): + ''' + Show a peer grading interface + ''' + course = get_course_with_access(request.user, course_id, 'load') + + # call problem list service + success = False + error_text = "" + problem_list = [] + try: + problem_list_json = peer_gs.get_problem_list(course_id, unique_id_for_user(request.user)) + problem_list_dict = json.loads(problem_list_json) + success = problem_list_dict['success'] + if 'error' in problem_list_dict: + error_text = problem_list_dict['error'] + + problem_list = problem_list_dict['problem_list'] + + except GradingServiceError: + error_text = "Error occured while contacting the grading service" + success = False + # catch error if if the json loads fails + except ValueError: + error_text = "Could not get problem list" + success = False + + ajax_url = _reverse_with_slash('peer_grading', course_id) + + return render_to_response('peer_grading/peer_grading.html', { + 'course': course, + 'course_id': course_id, + 'ajax_url': ajax_url, + 'success': success, + 'problem_list': problem_list, + 'error_text': error_text, + # Checked above + 'staff_access': False, }) + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def peer_grading_problem(request, course_id): + ''' + Show individual problem interface + ''' + course = get_course_with_access(request.user, course_id, 'load') + problem_location = request.GET.get("location") + + ajax_url = _reverse_with_slash('peer_grading', course_id) + + return render_to_response('peer_grading/peer_grading_problem.html', { + 'view_html': '', + 'course': course, + 'problem_location': problem_location, + 'course_id': course_id, + 'ajax_url': ajax_url, + # Checked above + 'staff_access': False, }) + + diff --git a/lms/djangoapps/portal/README.md b/lms/djangoapps/portal/README.md new file mode 100644 index 0000000000..09930ec8fb --- /dev/null +++ b/lms/djangoapps/portal/README.md @@ -0,0 +1,15 @@ +## acceptance_testing + +This fake django app is here to support acceptance testing using lettuce + +splinter (which wraps selenium). + +First you need to make sure that you've installed the requirements. +This includes lettuce, selenium, splinter, etc. +Do this with: +```pip install -r test-requirements.txt``` + +The settings.py environment file used is named acceptance.py. +It uses a test SQLite database defined as ../db/test-mitx.db. +You need to first start up the server separately, then run the lettuce scenarios. + +Full documentation can be found on the wiki at this link. diff --git a/lms/djangoapps/portal/__init__.py b/lms/djangoapps/portal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/portal/features/common.py b/lms/djangoapps/portal/features/common.py new file mode 100644 index 0000000000..20c2ab56b8 --- /dev/null +++ b/lms/djangoapps/portal/features/common.py @@ -0,0 +1,84 @@ +from lettuce import world, step#, before, after +from factories import * +from django.core.management import call_command +from nose.tools import assert_equals, assert_in +from lettuce.django import django_url +from django.conf import settings +from django.contrib.auth.models import User +from student.models import CourseEnrollment +import time + +from logging import getLogger +logger = getLogger(__name__) + +@step(u'I wait (?:for )?"(\d+)" seconds?$') +def wait(step, seconds): + time.sleep(float(seconds)) + +@step('I (?:visit|access|open) the homepage$') +def i_visit_the_homepage(step): + world.browser.visit(django_url('/')) + assert world.browser.is_element_present_by_css('header.global', 10) + +@step(u'I (?:visit|access|open) the dashboard$') +def i_visit_the_dashboard(step): + world.browser.visit(django_url('/dashboard')) + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + +@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$') +def click_the_link_called(step, text): + world.browser.find_link_by_text(text).click() + +@step('I should be on the dashboard page$') +def i_should_be_on_the_dashboard(step): + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + assert world.browser.title == 'Dashboard' + +@step(u'I (?:visit|access|open) the courses page$') +def i_am_on_the_courses_page(step): + world.browser.visit(django_url('/courses')) + assert world.browser.is_element_present_by_css('section.courses') + +@step('I should see that the path is "([^"]*)"$') +def i_should_see_that_the_path_is(step, path): + assert world.browser.url == django_url(path) + +@step(u'the page title should be "([^"]*)"$') +def the_page_title_should_be(step, title): + assert world.browser.title == title + +@step(r'should see that the url is "([^"]*)"$') +def should_have_the_url(step, url): + assert_equals(world.browser.url, url) + +@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$') +def should_see_a_link_called(step, text): + assert len(world.browser.find_link_by_text(text)) > 0 + +@step(r'should see "(.*)" (?:somewhere|anywhere) in (?:the|this) page') +def should_see_in_the_page(step, text): + assert_in(text, world.browser.html) + +@step('I am logged in$') +def i_am_logged_in(step): + world.create_user('robot') + world.log_in('robot@edx.org', 'test') + +@step('I am not logged in$') +def i_am_not_logged_in(step): + world.browser.cookies.delete() + +@step(u'I am registered for a course$') +def i_am_registered_for_a_course(step): + world.create_user('robot') + u = User.objects.get(username='robot') + CourseEnrollment.objects.create(user=u, course_id='MITx/6.002x/2012_Fall') + world.log_in('robot@edx.org', 'test') + +@step(u'I am an edX user$') +def i_am_an_edx_user(step): + world.create_user('robot') + +@step(u'User "([^"]*)" is an edX user$') +def registered_edx_user(step, uname): + world.create_user(uname) diff --git a/lms/djangoapps/portal/features/factories.py b/lms/djangoapps/portal/features/factories.py new file mode 100644 index 0000000000..07b615f468 --- /dev/null +++ b/lms/djangoapps/portal/features/factories.py @@ -0,0 +1,34 @@ +import factory +from student.models import User, UserProfile, Registration +from datetime import datetime +import uuid + +class UserProfileFactory(factory.Factory): + FACTORY_FOR = UserProfile + + user = None + name = 'Jack Foo' + level_of_education = None + gender = 'm' + mailing_address = None + goals = 'World domination' + +class RegistrationFactory(factory.Factory): + FACTORY_FOR = Registration + + user = None + activation_key = uuid.uuid4().hex + +class UserFactory(factory.Factory): + FACTORY_FOR = User + + username = 'robot' + email = 'robot+test@edx.org' + password = 'test' + first_name = 'Robot' + last_name = 'Test' + is_staff = False + is_active = True + is_superuser = False + last_login = datetime(2012, 1, 1) + date_joined = datetime(2011, 1, 1) diff --git a/lms/djangoapps/portal/features/homepage.feature b/lms/djangoapps/portal/features/homepage.feature new file mode 100644 index 0000000000..06a45c4bfa --- /dev/null +++ b/lms/djangoapps/portal/features/homepage.feature @@ -0,0 +1,47 @@ +Feature: Homepage for web users + In order to get an idea what edX is about + As a an anonymous web user + I want to check the information on the home page + + Scenario: User can see the "Login" button + Given I visit the homepage + Then I should see a link called "Log In" + + Scenario: User can see the "Sign up" button + Given I visit the homepage + Then I should see a link called "Sign Up" + + Scenario Outline: User can see main parts of the page + Given I visit the homepage + Then I should see a link called "" + When I click the link with the text "" + Then I should see that the path is "" + + Examples: + | Link | Path | + | Find Courses | /courses | + | About | /about | + | Jobs | /jobs | + | Contact | /contact | + + Scenario: User can visit the blog + Given I visit the homepage + When I click the link with the text "Blog" + Then I should see that the url is "http://blog.edx.org/" + + # TODO: test according to domain or policy + Scenario: User can see the partner institutions + Given I visit the homepage + Then I should see "" in the Partners section + + Examples: + | Partner | + | MITx | + | HarvardX | + | BerkeleyX | + | UTx | + | WellesleyX | + | GeorgetownX | + + # # TODO: Add scenario that tests the courses available + # # using a policy or a configuration file diff --git a/lms/djangoapps/portal/features/homepage.py b/lms/djangoapps/portal/features/homepage.py new file mode 100644 index 0000000000..638d65077c --- /dev/null +++ b/lms/djangoapps/portal/features/homepage.py @@ -0,0 +1,8 @@ +from lettuce import world, step +from nose.tools import assert_in + +@step('I should see "([^"]*)" in the Partners section$') +def i_should_see_partner(step, partner): + partners = world.browser.find_by_css(".partner .name span") + names = set(span.text for span in partners) + assert_in(partner, names) diff --git a/lms/djangoapps/portal/features/login.feature b/lms/djangoapps/portal/features/login.feature new file mode 100644 index 0000000000..23317b4876 --- /dev/null +++ b/lms/djangoapps/portal/features/login.feature @@ -0,0 +1,27 @@ +Feature: Login in as a registered user + As a registered user + In order to access my content + I want to be able to login in to edX + + Scenario: Login to an unactivated account + Given I am an edX user + And I am an unactivated user + And I visit the homepage + When I click the link with the text "Log In" + And I submit my credentials on the login form + Then I should see the login error message "This account has not been activated" + + Scenario: Login to an activated account + Given I am an edX user + And I am an activated user + And I visit the homepage + When I click the link with the text "Log In" + And I submit my credentials on the login form + Then I should be on the dashboard page + + Scenario: Logout of a signed in account + Given I am logged in + When I click the dropdown arrow + And I click the link with the text "Log Out" + Then I should see a link with the text "Log In" + And I should see that the path is "/" diff --git a/lms/djangoapps/portal/features/login.py b/lms/djangoapps/portal/features/login.py new file mode 100644 index 0000000000..5f200eb259 --- /dev/null +++ b/lms/djangoapps/portal/features/login.py @@ -0,0 +1,45 @@ +from lettuce import step, world +from django.contrib.auth.models import User + +@step('I am an unactivated user$') +def i_am_an_unactivated_user(step): + user_is_an_unactivated_user('robot') + +@step('I am an activated user$') +def i_am_an_activated_user(step): + user_is_an_activated_user('robot') + +@step('I submit my credentials on the login form') +def i_submit_my_credentials_on_the_login_form(step): + fill_in_the_login_form('email', 'robot@edx.org') + fill_in_the_login_form('password', 'test') + login_form = world.browser.find_by_css('form#login_form') + login_form.find_by_value('Access My Courses').click() + +@step(u'I should see the login error message "([^"]*)"$') +def i_should_see_the_login_error_message(step, msg): + login_error_div = world.browser.find_by_css('form#login_form #login_error') + assert (msg in login_error_div.text) + +@step(u'click the dropdown arrow$') +def click_the_dropdown(step): + css = ".dropdown" + e = world.browser.find_by_css(css) + e.click() + +#### helper functions + +def user_is_an_unactivated_user(uname): + u = User.objects.get(username=uname) + u.is_active = False + u.save() + +def user_is_an_activated_user(uname): + u = User.objects.get(username=uname) + u.is_active = True + u.save() + +def fill_in_the_login_form(field, value): + login_form = world.browser.find_by_css('form#login_form') + form_field = login_form.find_by_name(field) + form_field.fill(value) diff --git a/lms/djangoapps/portal/features/registration.feature b/lms/djangoapps/portal/features/registration.feature new file mode 100644 index 0000000000..d8a6796ee3 --- /dev/null +++ b/lms/djangoapps/portal/features/registration.feature @@ -0,0 +1,17 @@ +Feature: Register for a course + As a registered user + In order to access my class content + I want to register for a class on the edX website + + Scenario: I can register for a course + Given I am logged in + And I visit the courses page + When I register for the course numbered "6.002x" + Then I should see the course numbered "6.002x" in my dashboard + + Scenario: I can unregister for a course + Given I am registered for a course + And I visit the dashboard + When I click the link with the text "Unregister" + And I press the "Unregister" button in the Unenroll dialog + Then I should see "Looks like you haven't registered for any courses yet." somewhere in the page \ No newline at end of file diff --git a/lms/djangoapps/portal/features/registration.py b/lms/djangoapps/portal/features/registration.py new file mode 100644 index 0000000000..124bed4923 --- /dev/null +++ b/lms/djangoapps/portal/features/registration.py @@ -0,0 +1,24 @@ +from lettuce import world, step + +@step('I register for the course numbered "([^"]*)"$') +def i_register_for_the_course(step, course): + courses_section = world.browser.find_by_css('section.courses') + course_link_css = 'article[id*="%s"] a' % course + course_link = courses_section.find_by_css(course_link_css).first + course_link.click() + + intro_section = world.browser.find_by_css('section.intro') + register_link = intro_section.find_by_css('a.register') + register_link.click() + + assert world.browser.is_element_present_by_css('section.container.dashboard') + +@step(u'I should see the course numbered "([^"]*)" in my dashboard$') +def i_should_see_that_course_in_my_dashboard(step, course): + course_link_css = 'section.my-courses a[href*="%s"]' % course + assert world.browser.is_element_present_by_css(course_link_css) + +@step(u'I press the "([^"]*)" button in the Unenroll dialog') +def i_press_the_button_in_the_unenroll_dialog(step, value): + button_css = 'section#unenroll-modal input[value="%s"]' % value + world.browser.find_by_css(button_css).click() diff --git a/lms/djangoapps/portal/features/signup.feature b/lms/djangoapps/portal/features/signup.feature new file mode 100644 index 0000000000..b28a6819a1 --- /dev/null +++ b/lms/djangoapps/portal/features/signup.feature @@ -0,0 +1,16 @@ +Feature: Sign in + In order to use the edX content + As a new user + I want to signup for a student account + + Scenario: Sign up from the homepage + Given I visit the homepage + When I click the link with the text "Sign Up" + And I fill in "email" on the registration form with "robot2@edx.org" + And I fill in "password" on the registration form with "test" + And I fill in "username" on the registration form with "robot2" + And I fill in "name" on the registration form with "Robot Two" + And I check the checkbox named "terms_of_service" + And I check the checkbox named "honor_code" + And I press the "Create My Account" button on the registration form + Then I should see "THANKS FOR REGISTERING!" in the dashboard banner diff --git a/lms/djangoapps/portal/features/signup.py b/lms/djangoapps/portal/features/signup.py new file mode 100644 index 0000000000..afde72b589 --- /dev/null +++ b/lms/djangoapps/portal/features/signup.py @@ -0,0 +1,22 @@ +from lettuce import world, step + +@step('I fill in "([^"]*)" on the registration form with "([^"]*)"$') +def when_i_fill_in_field_on_the_registration_form_with_value(step, field, value): + register_form = world.browser.find_by_css('form#register_form') + form_field = register_form.find_by_name(field) + form_field.fill(value) + +@step('I press the "([^"]*)" button on the registration form$') +def i_press_the_button_on_the_registration_form(step, button): + register_form = world.browser.find_by_css('form#register_form') + register_form.find_by_value(button).click() + +@step('I check the checkbox named "([^"]*)"$') +def i_check_checkbox(step, checkbox): + world.browser.find_by_name(checkbox).check() + +@step('I should see "([^"]*)" in the dashboard banner$') +def i_should_see_text_in_the_dashboard_banner_section(step, text): + css_selector = "section.dashboard-banner h2" + assert (text in world.browser.find_by_css(css_selector).text) + \ No newline at end of file diff --git a/lms/djangoapps/terrain/__init__.py b/lms/djangoapps/terrain/__init__.py new file mode 100644 index 0000000000..dd6869e7fd --- /dev/null +++ b/lms/djangoapps/terrain/__init__.py @@ -0,0 +1,6 @@ +# Use this as your lettuce terrain file so that the common steps +# across all lms apps can be put in terrain/common +# See https://groups.google.com/forum/?fromgroups=#!msg/lettuce-users/5VyU9B4HcX8/USgbGIJdS5QJ +from terrain.browser import * +from terrain.steps import * +from terrain.factories import * \ No newline at end of file diff --git a/lms/djangoapps/terrain/browser.py b/lms/djangoapps/terrain/browser.py new file mode 100644 index 0000000000..7fe684e153 --- /dev/null +++ b/lms/djangoapps/terrain/browser.py @@ -0,0 +1,26 @@ +from lettuce import before, after, world +from splinter.browser import Browser +from logging import getLogger +import time + +logger = getLogger(__name__) +logger.info("Loading the lettuce acceptance testing terrain file...") + +from django.core.management import call_command + +@before.harvest +def initial_setup(server): + # Launch firefox + world.browser = Browser('firefox') + +@before.each_scenario +def reset_data(scenario): + # Clean out the django test database defined in the + # envs/acceptance.py file: mitx_all/db/test_mitx.db + logger.debug("Flushing the test database...") + call_command('flush', interactive=False) + +@after.all +def teardown_browser(total): + # Quit firefox + world.browser.quit() diff --git a/lms/djangoapps/terrain/factories.py b/lms/djangoapps/terrain/factories.py new file mode 100644 index 0000000000..ddab9e2b06 --- /dev/null +++ b/lms/djangoapps/terrain/factories.py @@ -0,0 +1,34 @@ +import factory +from student.models import User, UserProfile, Registration +from datetime import datetime +import uuid + +class UserProfileFactory(factory.Factory): + FACTORY_FOR = UserProfile + + user = None + name = 'Robot Test' + level_of_education = None + gender = 'm' + mailing_address = None + goals = 'World domination' + +class RegistrationFactory(factory.Factory): + FACTORY_FOR = Registration + + user = None + activation_key = uuid.uuid4().hex + +class UserFactory(factory.Factory): + FACTORY_FOR = User + + username = 'robot' + email = 'robot+test@edx.org' + password = 'test' + first_name = 'Robot' + last_name = 'Test' + is_staff = False + is_active = True + is_superuser = False + last_login = datetime(2012, 1, 1) + date_joined = datetime(2011, 1, 1) diff --git a/lms/djangoapps/terrain/steps.py b/lms/djangoapps/terrain/steps.py new file mode 100644 index 0000000000..ce82a0a044 --- /dev/null +++ b/lms/djangoapps/terrain/steps.py @@ -0,0 +1,171 @@ +from lettuce import world, step +from factories import * +from django.core.management import call_command +from lettuce.django import django_url +from django.conf import settings +from django.contrib.auth.models import User +from student.models import CourseEnrollment +from urllib import quote_plus +from nose.tools import assert_equals +from bs4 import BeautifulSoup +import time +import re +import os.path + +from logging import getLogger +logger = getLogger(__name__) + +@step(u'I wait (?:for )?"(\d+)" seconds?$') +def wait(step, seconds): + time.sleep(float(seconds)) + +@step('I (?:visit|access|open) the homepage$') +def i_visit_the_homepage(step): + world.browser.visit(django_url('/')) + assert world.browser.is_element_present_by_css('header.global', 10) + +@step(u'I (?:visit|access|open) the dashboard$') +def i_visit_the_dashboard(step): + world.browser.visit(django_url('/dashboard')) + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + +@step('I should be on the dashboard page$') +def i_should_be_on_the_dashboard(step): + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + assert world.browser.title == 'Dashboard' + +@step(u'I (?:visit|access|open) the courses page$') +def i_am_on_the_courses_page(step): + world.browser.visit(django_url('/courses')) + assert world.browser.is_element_present_by_css('section.courses') + +@step(u'I press the "([^"]*)" button$') +def and_i_press_the_button(step, value): + button_css = 'input[value="%s"]' % value + world.browser.find_by_css(button_css).first.click() + +@step('I should see that the path is "([^"]*)"$') +def i_should_see_that_the_path_is(step, path): + assert world.browser.url == django_url(path) + +@step(u'the page title should be "([^"]*)"$') +def the_page_title_should_be(step, title): + assert_equals(world.browser.title, title) + +@step('I am a logged in user$') +def i_am_logged_in_user(step): + create_user('robot') + log_in('robot@edx.org','test') + +@step('I am not logged in$') +def i_am_not_logged_in(step): + world.browser.cookies.delete() + +@step('I am registered for a course$') +def i_am_registered_for_a_course(step): + create_user('robot') + u = User.objects.get(username='robot') + CourseEnrollment.objects.get_or_create(user=u, course_id='MITx/6.002x/2012_Fall') + +@step('I am registered for course "([^"]*)"$') +def i_am_registered_for_course_by_id(step, course_id): + register_by_course_id(course_id) + +@step('I am staff for course "([^"]*)"$') +def i_am_staff_for_course_by_id(step, course_id): + register_by_course_id(course_id, True) + +@step('I log in$') +def i_log_in(step): + log_in('robot@edx.org','test') + +@step(u'I am an edX user$') +def i_am_an_edx_user(step): + create_user('robot') + +#### helper functions +@world.absorb +def create_user(uname): + portal_user = UserFactory.build(username=uname, email=uname + '@edx.org') + portal_user.set_password('test') + portal_user.save() + + registration = RegistrationFactory(user=portal_user) + registration.register(portal_user) + registration.activate() + + user_profile = UserProfileFactory(user=portal_user) + +@world.absorb +def log_in(email, password): + world.browser.cookies.delete() + world.browser.visit(django_url('/')) + world.browser.is_element_present_by_css('header.global', 10) + world.browser.click_link_by_href('#login-modal') + login_form = world.browser.find_by_css('form#login_form') + login_form.find_by_name('email').fill(email) + login_form.find_by_name('password').fill(password) + login_form.find_by_name('submit').click() + + # wait for the page to redraw + assert world.browser.is_element_present_by_css('.content-wrapper', 10) + +@world.absorb +def register_by_course_id(course_id, is_staff=False): + create_user('robot') + u = User.objects.get(username='robot') + if is_staff: + u.is_staff=True + u.save() + CourseEnrollment.objects.get_or_create(user=u, course_id=course_id) + +@world.absorb +def save_the_html(path='/tmp'): + u = world.browser.url + html = world.browser.html.encode('ascii', 'ignore') + filename = '%s.html' % quote_plus(u) + f = open('%s/%s' % (path, filename), 'w') + f.write(html) + f.close + +@world.absorb +def save_the_course_content(path='/tmp'): + html = world.browser.html.encode('ascii', 'ignore') + soup = BeautifulSoup(html) + + # get rid of the header, we only want to compare the body + soup.head.decompose() + + # for now, remove the data-id attributes, because they are + # causing mismatches between cms-master and master + for item in soup.find_all(attrs={'data-id': re.compile('.*')}): + del item['data-id'] + + # we also need to remove them from unrendered problems, + # where they are contained in the text of divs instead of + # in attributes of tags + # Be careful of whether or not it was the last attribute + # and needs a trailing space + for item in soup.find_all(text=re.compile(' data-id=".*?" ')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?" ', ' ', s)) + + for item in soup.find_all(text=re.compile(' data-id=".*?"')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?"', ' ', s)) + + # prettify the html so it will compare better, with + # each HTML tag on its own line + output = soup.prettify() + + # use string slicing to grab everything after 'courseware/' in the URL + u = world.browser.url + section_url = u[u.find('courseware/')+11:] + + if not os.path.exists(path): + os.makedirs(path) + + filename = '%s.html' % (quote_plus(section_url)) + f = open('%s/%s' % (path, filename), 'w') + f.write(output) + f.close diff --git a/lms/envs/acceptance.py b/lms/envs/acceptance.py new file mode 100644 index 0000000000..e0857a4392 --- /dev/null +++ b/lms/envs/acceptance.py @@ -0,0 +1,41 @@ +""" +This config file extends the test environment configuration +so that we can run the lettuce acceptance tests. +""" +from .test import * + +# You need to start the server in debug mode, +# otherwise the browser will not render the pages correctly +DEBUG = True + +# Show the courses that are in the data directory +COURSES_ROOT = ENV_ROOT / "data" +DATA_DIR = COURSES_ROOT +MODULESTORE = { + 'default': { + 'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore', + 'OPTIONS': { + 'data_dir': DATA_DIR, + 'default_class': 'xmodule.hidden_module.HiddenDescriptor', + } + } +} + +# Set this up so that rake lms[acceptance] and running the +# harvest command both use the same (test) database +# which they can flush without messing up your dev db +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ENV_ROOT / "db" / "test_mitx.db", + 'TEST_NAME': ENV_ROOT / "db" / "test_mitx.db", + } +} + +# Do not display the YouTube videos in the browser while running the +# acceptance tests. This makes them faster and more reliable +MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True + +# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command +INSTALLED_APPS += ('lettuce.django',) +LETTUCE_APPS = ('portal',) # dummy app covers the home page, login, registration, and course enrollment diff --git a/lms/envs/aws.py b/lms/envs/aws.py index 0516bddc56..7b8c48f4af 100644 --- a/lms/envs/aws.py +++ b/lms/envs/aws.py @@ -76,8 +76,8 @@ DATABASES = AUTH_TOKENS['DATABASES'] XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE'] -STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE') - +STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE', STAFF_GRADING_INTERFACE) +PEER_GRADING_INTERFACE = AUTH_TOKENS.get('PEER_GRADING_INTERFACE', PEER_GRADING_INTERFACE) PEARSON_TEST_USER = "pearsontest" PEARSON_TEST_PASSWORD = AUTH_TOKENS.get("PEARSON_TEST_PASSWORD") diff --git a/lms/envs/cms/acceptance.py b/lms/envs/cms/acceptance.py new file mode 100644 index 0000000000..e5ee2937f4 --- /dev/null +++ b/lms/envs/cms/acceptance.py @@ -0,0 +1,23 @@ +""" +This config file is a copy of dev environment without the Debug +Toolbar. I it suitable to run against acceptance tests. + +""" +from .dev import * + +# REMOVE DEBUG TOOLBAR + +INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar') +INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar_mongo') + +MIDDLEWARE_CLASSES = tuple(e for e in MIDDLEWARE_CLASSES \ + if e != 'debug_toolbar.middleware.DebugToolbarMiddleware') + + +########################### LETTUCE TESTING ########################## +MITX_FEATURES['DISPLAY_TOY_COURSES'] = True + +INSTALLED_APPS += ('lettuce.django',) +# INSTALLED_APPS += ('portal',) + +LETTUCE_APPS = ('portal',) # dummy app covers the home page, login, registration, and course enrollment diff --git a/lms/envs/common.py b/lms/envs/common.py index 48badbd325..4fe4a5a3dd 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -76,6 +76,8 @@ MITX_FEATURES = { 'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL + 'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests + # extrernal access methods 'ACCESS_REQUIRE_STAFF_FOR_COURSE': False, 'AUTH_USE_OPENID': False, @@ -187,6 +189,9 @@ DEBUG_TRACK_LOG = False MITX_ROOT_URL = '' +LOGIN_REDIRECT_URL = MITX_ROOT_URL + '/accounts/login' +LOGIN_URL = MITX_ROOT_URL + '/accounts/login' + COURSE_NAME = "6.002_Spring_2012" COURSE_NUMBER = "6.002x" COURSE_TITLE = "Circuits and Electronics" @@ -324,7 +329,14 @@ WIKI_LINK_DEFAULT_LEVEL = 2 ################################# Staff grading config ##################### -STAFF_GRADING_INTERFACE = None +#By setting up the default settings with an incorrect user name and password, +# will get an error when attempting to connect +STAFF_GRADING_INTERFACE = { + 'url': 'http://sandbox-grader-001.m.edx.org/staff_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + } + # Used for testing, debugging MOCK_STAFF_GRADING = False @@ -332,6 +344,19 @@ MOCK_STAFF_GRADING = False PEARSONVUE_SIGNINPAGE_URL = "https://www1.pearsonvue.com/testtaker/signin/SignInPage/EDX" +################################# Peer grading config ##################### + +#By setting up the default settings with an incorrect user name and password, +# will get an error when attempting to connect +PEER_GRADING_INTERFACE = { + 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + } + +# Used for testing, debugging +MOCK_PEER_GRADING = False + ################################# Jasmine ################################### JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee' @@ -407,7 +432,9 @@ courseware_only_js += [ ] main_vendor_js = [ + 'js/vendor/RequireJS.js', 'js/vendor/json2.js', + 'js/vendor/RequireJS.js', 'js/vendor/jquery.min.js', 'js/vendor/jquery-ui.min.js', 'js/vendor/jquery.cookie.js', @@ -418,6 +445,7 @@ main_vendor_js = [ discussion_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/discussion/**/*.coffee')) staff_grading_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/staff_grading/**/*.coffee')) +peer_grading_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/peer_grading/**/*.coffee')) # Load javascript from all of the available xmodules, and @@ -493,6 +521,7 @@ PIPELINE_JS = { for pth in sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/**/*.coffee'))\ if (pth not in courseware_only_js and pth not in discussion_js and + pth not in peer_grading_js and pth not in staff_grading_js) ] + [ 'js/form.ext.js', @@ -526,8 +555,11 @@ PIPELINE_JS = { 'staff_grading' : { 'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in staff_grading_js], 'output_filename': 'js/staff_grading.js' + }, + 'peer_grading' : { + 'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in peer_grading_js], + 'output_filename': 'js/peer_grading.js' } - } PIPELINE_DISABLE_WRAPPER = True @@ -600,6 +632,7 @@ INSTALLED_APPS = ( 'util', 'certificates', 'instructor', + 'open_ended_grading', 'psychometrics', 'licenses', diff --git a/lms/envs/dev.py b/lms/envs/dev.py index 0ad42f67d3..f5999bf52e 100644 --- a/lms/envs/dev.py +++ b/lms/envs/dev.py @@ -102,6 +102,10 @@ SUBDOMAIN_BRANDING = { COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE" +################################# mitx revision string ##################### + +MITX_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip() + ################################# Staff grading config ##################### STAFF_GRADING_INTERFACE = { @@ -110,6 +114,13 @@ STAFF_GRADING_INTERFACE = { 'password': 'abcd', } +################################# Peer grading config ##################### + +PEER_GRADING_INTERFACE = { + 'url': 'http://127.0.0.1:3033/peer_grading', + 'username': 'lms', + 'password': 'abcd', + } ################################ LMS Migration ################################# MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll diff --git a/lms/envs/test.py b/lms/envs/test.py index ef2a343db4..e9e4a43c6f 100644 --- a/lms/envs/test.py +++ b/lms/envs/test.py @@ -44,12 +44,6 @@ STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json" COURSES_ROOT = TEST_ROOT / "data" DATA_DIR = COURSES_ROOT -LOGGING = get_logger_config(TEST_ROOT / "log", - logging_env="dev", - tracking_filename="tracking.log", - dev_env=True, - debug=True) - COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" # Where the content data is checked out. This may not exist on jenkins. GITHUB_REPO_ROOT = ENV_ROOT / "data" @@ -68,6 +62,7 @@ XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds # Don't rely on a real staff grading backend MOCK_STAFF_GRADING = True +MOCK_PEER_GRADING = True # TODO (cpennington): We need to figure out how envs/test.py can inject things # into common.py so that we don't have to repeat this sort of thing diff --git a/lms/lib/symmath/formula.py b/lms/lib/symmath/formula.py index 1698b004d9..bab0ab3691 100644 --- a/lms/lib/symmath/formula.py +++ b/lms/lib/symmath/formula.py @@ -422,7 +422,8 @@ class formula(object): def GetContentMathML(self, asciimath, mathml): # URL = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' - URL = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' + # URL = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' + URL = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' if 1: payload = {'asciiMathInput': asciimath, @@ -430,7 +431,7 @@ class formula(object): #'asciiMathML':unicode(mathml).encode('utf-8'), } headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"} - r = requests.post(URL, data=payload, headers=headers) + r = requests.post(URL, data=payload, headers=headers, verify=False) r.encoding = 'utf-8' ret = r.text #print "encoding: ",r.encoding diff --git a/lms/static/coffee/files.json b/lms/static/coffee/files.json index 4721ef58bb..5dc03613b9 100644 --- a/lms/static/coffee/files.json +++ b/lms/static/coffee/files.json @@ -1,5 +1,6 @@ { "js_files": [ + "/static/js/vendor/RequireJS.js", "/static/js/vendor/jquery.min.js", "/static/js/vendor/jquery-ui.min.js", "/static/js/vendor/jquery.leanModal.min.js", diff --git a/lms/static/coffee/spec/requirejs_spec.coffee b/lms/static/coffee/spec/requirejs_spec.coffee new file mode 100644 index 0000000000..10d34a2f75 --- /dev/null +++ b/lms/static/coffee/spec/requirejs_spec.coffee @@ -0,0 +1,89 @@ +describe "RequireJS namespacing", -> + beforeEach -> + + # Jasmine does not provide a way to use the typeof operator. We need + # to create our own custom matchers so that a TypeError is not thrown. + @addMatchers + requirejsTobeUndefined: -> + typeof requirejs is "undefined" + + requireTobeUndefined: -> + typeof require is "undefined" + + defineTobeUndefined: -> + typeof define is "undefined" + + + it "check that the RequireJS object is present in the global namespace", -> + expect(RequireJS).toEqual jasmine.any(Object) + expect(window.RequireJS).toEqual jasmine.any(Object) + + it "check that requirejs(), require(), and define() are not in the global namespace", -> + + # The custom matchers that we defined in the beforeEach() function do + # not operate on an object. We pass a dummy empty object {} not to + # confuse Jasmine. + expect({}).requirejsTobeUndefined() + expect({}).requireTobeUndefined() + expect({}).defineTobeUndefined() + expect(window.requirejs).not.toBeDefined() + expect(window.require).not.toBeDefined() + expect(window.define).not.toBeDefined() + + +describe "RequireJS module creation", -> + inDefineCallback = undefined + inRequireCallback = undefined + it "check that we can use RequireJS to define() and require() a module", -> + + # Because Require JS works asynchronously when defining and requiring + # modules, we need to use the special Jasmine functions runs(), and + # waitsFor() to set up this test. + runs -> + + # Initialize the variable that we will test for. They will be set + # to true in the appropriate callback functions called by Require + # JS. If their values do not change, this will mean that something + # is not working as is intended. + inDefineCallback = false + inRequireCallback = false + + # Define our test module. + RequireJS.define "test_module", [], -> + inDefineCallback = true + + # This module returns an object. It can be accessed via the + # Require JS require() function. + module_status: "OK" + + + # Require our defined test module. + RequireJS.require ["test_module"], (test_module) -> + inRequireCallback = true + + # If our test module was defined properly, then we should + # be able to get the object it returned, and query some + # property. + expect(test_module.module_status).toBe "OK" + + + + # We will wait for a specified amount of time (1 second), before + # checking if our module was defined and that we were able to + # require() the module. + waitsFor (-> + + # If at least one of the callback functions was not reached, we + # fail this test. + return false if (inDefineCallback isnt true) or (inRequireCallback isnt true) + + # Both of the callbacks were reached. + true + ), "We should eventually end up in the defined callback", 1000 + + # The final test behavior, after waitsFor() finishes waiting. + runs -> + expect(inDefineCallback).toBeTruthy() + expect(inRequireCallback).toBeTruthy() + + diff --git a/lms/static/coffee/src/main.coffee b/lms/static/coffee/src/main.coffee index ec5cbdec5b..df4c8861f6 100644 --- a/lms/static/coffee/src/main.coffee +++ b/lms/static/coffee/src/main.coffee @@ -32,8 +32,18 @@ $ -> $('#login').click -> $('#login_form input[name="email"]').focus() + _gaq.push(['_trackPageview', '/login']) false $('#signup').click -> $('#signup-modal input[name="email"]').focus() + _gaq.push(['_trackPageview', '/signup']) false + + # fix for ie + if !Array::indexOf + Array::indexOf = (obj, start = 0) -> + for ele, i in this[start..] + if ele is obj + return i + start + return -1 diff --git a/lms/static/coffee/src/peer_grading/peer_grading.coffee b/lms/static/coffee/src/peer_grading/peer_grading.coffee new file mode 100644 index 0000000000..0736057df8 --- /dev/null +++ b/lms/static/coffee/src/peer_grading/peer_grading.coffee @@ -0,0 +1,13 @@ +# This is a simple class that just hides the error container +# and message container when they are empty +# Can (and should be) expanded upon when our problem list +# becomes more sophisticated +class PeerGrading + constructor: () -> + @error_container = $('.error-container') + @error_container.toggle(not @error_container.is(':empty')) + + @message_container = $('.message-container') + @message_container.toggle(not @message_container.is(':empty')) + +$(document).ready(() -> new PeerGrading()) diff --git a/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee b/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee new file mode 100644 index 0000000000..e294c50f7c --- /dev/null +++ b/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee @@ -0,0 +1,390 @@ +################################## +# +# This is the JS that renders the peer grading problem page. +# Fetches the correct problem and/or calibration essay +# and sends back the grades +# +# Should not be run when we don't have a location to send back +# to the server +# +# PeerGradingProblemBackend - +# makes all the ajax requests and provides a mock interface +# for testing purposes +# +# PeerGradingProblem - +# handles the rendering and user interactions with the interface +# +################################## +class PeerGradingProblemBackend + constructor: (ajax_url, mock_backend) -> + @mock_backend = mock_backend + @ajax_url = ajax_url + @mock_cnt = 0 + + post: (cmd, data, callback) -> + if @mock_backend + callback(@mock(cmd, data)) + else + # if this post request fails, the error callback will catch it + $.post(@ajax_url + cmd, data, callback) + .error => callback({success: false, error: "Error occured while performing this operation"}) + + mock: (cmd, data) -> + if cmd == 'is_student_calibrated' + # change to test each version + response = + success: true + calibrated: @mock_cnt >= 2 + else if cmd == 'show_calibration_essay' + #response = + # success: false + # error: "There was an error" + @mock_cnt++ + response = + success: true + submission_id: 1 + submission_key: 'abcd' + student_response: ''' + Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. + +The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. + ''' + prompt: ''' +

S11E3: Metal Bands

+

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

+

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

+

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

+ ''' + rubric: ''' +
    +
  • Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.
  • +
  • Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.
  • +
  • Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.
  • +
+ +

Please score your response according to how many of the above components you identified:

+ ''' + max_score: 4 + else if cmd == 'get_next_submission' + response = + success: true + submission_id: 1 + submission_key: 'abcd' + student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa. + +Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum. + +Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. ''' + prompt: ''' +

S11E3: Metal Bands

+

Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

+

* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

+

This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

+ ''' + rubric: ''' +
    +
  • Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.
  • +
  • Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.
  • +
  • Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.
  • +
+ +

Please score your response according to how many of the above components you identified:

+ ''' + max_score: 4 + else if cmd == 'save_calibration_essay' + response = + success: true + actual_score: 2 + else if cmd == 'save_grade' + response = + success: true + + return response + + +class PeerGradingProblem + constructor: (backend) -> + @prompt_wrapper = $('.prompt-wrapper') + @backend = backend + + + # get the location of the problem + @location = $('.peer-grading').data('location') + # prevent this code from trying to run + # when we don't have a location + if(!@location) + return + + # get the other elements we want to fill in + @submission_container = $('.submission-container') + @prompt_container = $('.prompt-container') + @rubric_container = $('.rubric-container') + @calibration_panel = $('.calibration-panel') + @grading_panel = $('.grading-panel') + @content_panel = $('.content-panel') + @grading_message = $('.grading-message') + @grading_message.hide() + + @grading_wrapper =$('.grading-wrapper') + @calibration_feedback_panel = $('.calibration-feedback') + @interstitial_page = $('.interstitial-page') + @interstitial_page.hide() + + @error_container = $('.error-container') + + @submission_key_input = $("input[name='submission-key']") + @essay_id_input = $("input[name='essay-id']") + @feedback_area = $('.feedback-area') + + @score_selection_container = $('.score-selection-container') + @score = null + @calibration = null + + @submit_button = $('.submit-button') + @action_button = $('.action-button') + @calibration_feedback_button = $('.calibration-feedback-button') + @interstitial_page_button = $('.interstitial-page-button') + + Collapsible.setCollapsibles(@content_panel) + + # Set up the click event handlers + @action_button.click -> history.back() + @calibration_feedback_button.click => + @calibration_feedback_panel.hide() + @grading_wrapper.show() + @is_calibrated_check() + + @interstitial_page_button.click => + @interstitial_page.hide() + @is_calibrated_check() + + @is_calibrated_check() + + + ########## + # + # Ajax calls to the backend + # + ########## + is_calibrated_check: () => + @backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback) + + fetch_calibration_essay: () => + @backend.post('show_calibration_essay', {location: @location}, @render_calibration) + + fetch_submission_essay: () => + @backend.post('get_next_submission', {location: @location}, @render_submission) + + construct_data: () -> + data = + score: @score + location: @location + submission_id: @essay_id_input.val() + submission_key: @submission_key_input.val() + feedback: @feedback_area.val() + return data + + + submit_calibration_essay: ()=> + data = @construct_data() + @backend.post('save_calibration_essay', data, @calibration_callback) + + submit_grade: () => + data = @construct_data() + @backend.post('save_grade', data, @submission_callback) + + + ########## + # + # Callbacks for various events + # + ########## + + # called after we perform an is_student_calibrated check + calibration_check_callback: (response) => + if response.success + # if we haven't been calibrating before + if response.calibrated and (@calibration == null or @calibration == false) + @calibration = false + @fetch_submission_essay() + # If we were calibrating before and no longer need to, + # show the interstitial page + else if response.calibrated and @calibration == true + @calibration = false + @render_interstitial_page() + else + @calibration = true + @fetch_calibration_essay() + else if response.error + @render_error(response.error) + else + @render_error("Error contacting the grading service") + + + # called after we submit a calibration score + calibration_callback: (response) => + if response.success + @render_calibration_feedback(response) + else if response.error + @render_error(response.error) + else + @render_error("Error saving calibration score") + + # called after we submit a submission score + submission_callback: (response) => + if response.success + @is_calibrated_check() + @grading_message.fadeIn() + @grading_message.html("

Grade sent successfully.

") + else + if response.error + @render_error(response.error) + else + @render_error("Error occurred while submitting grade") + + # called after a grade is selected on the interface + graded_callback: (event) => + @grading_message.hide() + @score = event.target.value + @show_submit_button() + + + + ########## + # + # Rendering methods and helpers + # + ########## + # renders a calibration essay + render_calibration: (response) => + if response.success + + # load in all the data + @submission_container.html("

Training Essay

") + @render_submission_data(response) + # TODO: indicate that we're in calibration mode + @calibration_panel.addClass('current-state') + @grading_panel.removeClass('current-state') + + # Display the right text + # both versions of the text are written into the template itself + # we only need to show/hide the correct ones at the correct time + @calibration_panel.find('.calibration-text').show() + @grading_panel.find('.calibration-text').show() + @calibration_panel.find('.grading-text').hide() + @grading_panel.find('.grading-text').hide() + + + @submit_button.unbind('click') + @submit_button.click @submit_calibration_essay + + else if response.error + @render_error(response.error) + else + @render_error("An error occurred while retrieving the next calibration essay") + + # Renders a student submission to be graded + render_submission: (response) => + if response.success + @submit_button.hide() + @submission_container.html("

Submitted Essay

") + @render_submission_data(response) + + @calibration_panel.removeClass('current-state') + @grading_panel.addClass('current-state') + + # Display the correct text + # both versions of the text are written into the template itself + # we only need to show/hide the correct ones at the correct time + @calibration_panel.find('.calibration-text').hide() + @grading_panel.find('.calibration-text').hide() + @calibration_panel.find('.grading-text').show() + @grading_panel.find('.grading-text').show() + + @submit_button.unbind('click') + @submit_button.click @submit_grade + else if response.error + @render_error(response.error) + else + @render_error("An error occured when retrieving the next submission.") + + + make_paragraphs: (text) -> + paragraph_split = text.split(/\n\s*\n/) + new_text = '' + for paragraph in paragraph_split + new_text += "

#{paragraph}

" + return new_text + + # render common information between calibration and grading + render_submission_data: (response) => + @content_panel.show() + + @submission_container.append(@make_paragraphs(response.student_response)) + @prompt_container.html(response.prompt) + @rubric_container.html(response.rubric) + @submission_key_input.val(response.submission_key) + @essay_id_input.val(response.submission_id) + @setup_score_selection(response.max_score) + + @submit_button.hide() + @action_button.hide() + @calibration_feedback_panel.hide() + + + render_calibration_feedback: (response) => + # display correct grade + @calibration_feedback_panel.slideDown() + calibration_wrapper = $('.calibration-feedback-wrapper') + calibration_wrapper.html("

The score you gave was: #{@score}. The actual score is: #{response.actual_score}

") + + + score = parseInt(@score) + actual_score = parseInt(response.actual_score) + + if score == actual_score + calibration_wrapper.append("

Congratulations! Your score matches the actual score!

") + else + calibration_wrapper.append("

Please try to understand the grading critera better to be more accurate next time.

") + + # disable score selection and submission from the grading interface + $("input[name='score-selection']").attr('disabled', true) + @submit_button.hide() + + render_interstitial_page: () => + @content_panel.hide() + @interstitial_page.show() + + render_error: (error_message) => + @error_container.show() + @calibration_feedback_panel.hide() + @error_container.html(error_message) + @content_panel.hide() + @action_button.show() + + show_submit_button: () => + @submit_button.show() + + setup_score_selection: (max_score) => + # first, get rid of all the old inputs, if any. + @score_selection_container.html('Choose score: ') + + # Now create new labels and inputs for each possible score. + for score in [0..max_score] + id = 'score-' + score + label = """""" + + input = """ + + """ # " fix broken parsing in emacs + @score_selection_container.append(input + label) + + # And now hook up an event handler again + $("input[name='score-selection']").change @graded_callback + + + +mock_backend = false +ajax_url = $('.peer-grading').data('ajax_url') +backend = new PeerGradingProblemBackend(ajax_url, mock_backend) +$(document).ready(() -> new PeerGradingProblem(backend)) diff --git a/lms/static/files/edx-identity.zip b/lms/static/files/edx-identity.zip new file mode 100644 index 0000000000..a37d2b4e20 Binary files /dev/null and b/lms/static/files/edx-identity.zip differ diff --git a/lms/static/images/logo-edx-support.png b/lms/static/images/logo-edx-support.png new file mode 100644 index 0000000000..054647c99f Binary files /dev/null and b/lms/static/images/logo-edx-support.png differ diff --git a/lms/static/images/press-kit/3.091x_high-res.png b/lms/static/images/press-kit/3.091x_high-res.png new file mode 100644 index 0000000000..4bd1950734 Binary files /dev/null and b/lms/static/images/press-kit/3.091x_high-res.png differ diff --git a/lms/static/images/press-kit/3.091x_x200.jpg b/lms/static/images/press-kit/3.091x_x200.jpg new file mode 100644 index 0000000000..9dff993e6b Binary files /dev/null and b/lms/static/images/press-kit/3.091x_x200.jpg differ diff --git a/lms/static/images/press-kit/6.002x_high-res.png b/lms/static/images/press-kit/6.002x_high-res.png new file mode 100755 index 0000000000..b785b825fd Binary files /dev/null and b/lms/static/images/press-kit/6.002x_high-res.png differ diff --git a/lms/static/images/press-kit/6.002x_x200.jpg b/lms/static/images/press-kit/6.002x_x200.jpg new file mode 100644 index 0000000000..c12588797c Binary files /dev/null and b/lms/static/images/press-kit/6.002x_x200.jpg differ diff --git a/lms/static/images/press-kit/anant-agarwal_high-res.jpg.REMOVED.git-id b/lms/static/images/press-kit/anant-agarwal_high-res.jpg.REMOVED.git-id new file mode 100644 index 0000000000..414d117127 --- /dev/null +++ b/lms/static/images/press-kit/anant-agarwal_high-res.jpg.REMOVED.git-id @@ -0,0 +1 @@ +b154ce99fb5c8d413ba769e8cc0df94ed674c3f4 \ No newline at end of file diff --git a/lms/static/images/press-kit/anant-agarwal_x200.jpg b/lms/static/images/press-kit/anant-agarwal_x200.jpg new file mode 100644 index 0000000000..a004b7ecdb Binary files /dev/null and b/lms/static/images/press-kit/anant-agarwal_x200.jpg differ diff --git a/lms/static/images/press-kit/anant-tablet_high-res.jpg.REMOVED.git-id b/lms/static/images/press-kit/anant-tablet_high-res.jpg.REMOVED.git-id new file mode 100644 index 0000000000..c3ba812427 --- /dev/null +++ b/lms/static/images/press-kit/anant-tablet_high-res.jpg.REMOVED.git-id @@ -0,0 +1 @@ +2b8c58b098bdb17f9ddcbb2098f94c50fdcedf60 \ No newline at end of file diff --git a/lms/static/images/press-kit/anant-tablet_x200.jpg b/lms/static/images/press-kit/anant-tablet_x200.jpg new file mode 100644 index 0000000000..1d35360745 Binary files /dev/null and b/lms/static/images/press-kit/anant-tablet_x200.jpg differ diff --git a/lms/static/images/press-kit/edx-video-editing_high-res.jpg.REMOVED.git-id b/lms/static/images/press-kit/edx-video-editing_high-res.jpg.REMOVED.git-id new file mode 100644 index 0000000000..2b3ee55249 --- /dev/null +++ b/lms/static/images/press-kit/edx-video-editing_high-res.jpg.REMOVED.git-id @@ -0,0 +1 @@ +7d8b9879f7e5b859910edba7249661eedd3fcf37 \ No newline at end of file diff --git a/lms/static/images/press-kit/edx-video-editing_x200.jpg b/lms/static/images/press-kit/edx-video-editing_x200.jpg new file mode 100644 index 0000000000..c4ed87a503 Binary files /dev/null and b/lms/static/images/press-kit/edx-video-editing_x200.jpg differ diff --git a/lms/static/images/press-kit/piotr-mitros_high-res.jpg.REMOVED.git-id b/lms/static/images/press-kit/piotr-mitros_high-res.jpg.REMOVED.git-id new file mode 100644 index 0000000000..23fb583f92 --- /dev/null +++ b/lms/static/images/press-kit/piotr-mitros_high-res.jpg.REMOVED.git-id @@ -0,0 +1 @@ +caf8b43337faa75cef5da5cd090010215a67b1bd \ No newline at end of file diff --git a/lms/static/images/press-kit/piotr-mitros_x200.jpg b/lms/static/images/press-kit/piotr-mitros_x200.jpg new file mode 100644 index 0000000000..36c21becd6 Binary files /dev/null and b/lms/static/images/press-kit/piotr-mitros_x200.jpg differ diff --git a/lms/static/sass/application.scss b/lms/static/sass/application.scss index 4e532cf30e..d1dd3d1d4e 100644 --- a/lms/static/sass/application.scss +++ b/lms/static/sass/application.scss @@ -23,6 +23,7 @@ @import 'multicourse/courses'; @import 'multicourse/course_about'; @import 'multicourse/jobs'; +@import 'multicourse/media-kit'; @import 'multicourse/about_pages'; @import 'multicourse/press_release'; @import 'multicourse/password_reset'; diff --git a/lms/static/sass/bourbon/css3/_box-sizing.scss b/lms/static/sass/bourbon/css3/_box-sizing.scss index 42aaaf6fc7..d61523b5f1 100644 --- a/lms/static/sass/bourbon/css3/_box-sizing.scss +++ b/lms/static/sass/bourbon/css3/_box-sizing.scss @@ -2,6 +2,5 @@ // content-box | border-box | inherit -webkit-box-sizing: $box; -moz-box-sizing: $box; - box-sizing: $box; box-sizing: $box; *behavior: url(/static/scripts/boxsizing.htc); } diff --git a/lms/static/sass/course/_staff_grading.scss b/lms/static/sass/course/_staff_grading.scss index f1b6c5845d..92fa760d4a 100644 --- a/lms/static/sass/course/_staff_grading.scss +++ b/lms/static/sass/course/_staff_grading.scss @@ -1,4 +1,5 @@ -div.staff-grading { +div.staff-grading, +div.peer-grading{ textarea.feedback-area { height: 75px; margin: 20px; @@ -36,8 +37,8 @@ div.staff-grading { } .prompt-information-container, - .submission-wrapper, .rubric-wrapper, + .calibration-feedback-wrapper, .grading-container { border: 1px solid gray; @@ -49,6 +50,18 @@ div.staff-grading { padding: 15px; margin-left: 0px; } + .submission-wrapper + { + h3 + { + margin-bottom: 15px; + } + p + { + margin-left:10px; + } + padding: 15px; + } .meta-info-wrapper { background-color: #eee; @@ -67,7 +80,8 @@ div.staff-grading { } } } - .message-container + .message-container, + .grading-message { background-color: $yellow; padding: 10px; @@ -81,6 +95,69 @@ div.staff-grading { margin-bottom:5px; font-size: .8em; } + + .instructions-panel + { + + margin-right:20px; + > div + { + padding: 10px; + margin: 0px; + background: #eee; + height: 10em; + h3 + { + text-align:center; + text-transform:uppercase; + color: #777; + } + p + { + color: #777; + } + } + .calibration-panel + { + float:left; + width:48%; + } + .grading-panel + { + float:right; + width: 48%; + } + .current-state + { + background: #1D9DD9; + h3, p + { + color: white; + } + } + @include clearfix; + } + + + .collapsible + { + margin-left: 0px; + header + { + margin-top:20px; + margin-bottom:20px; + font-size: 1.2em; + } + } + .interstitial-page + { + text-align: center; + input[type=button] + { + margin-top: 20px; + } + } padding: 40px; } + diff --git a/lms/static/sass/multicourse/_home.scss b/lms/static/sass/multicourse/_home.scss index 3d04165484..669bd889b0 100644 --- a/lms/static/sass/multicourse/_home.scss +++ b/lms/static/sass/multicourse/_home.scss @@ -336,6 +336,7 @@ border-bottom: 1px solid rgb(200,200,200); @include clearfix; padding: 10px 20px 8px; + position: relative; h2 { float: left; @@ -343,16 +344,27 @@ text-shadow: 0 1px rgba(255,255,255, 0.6); } - a { - color: $lighter-base-font-color; + .action.action-mediakit { float: right; - font-style: italic; - font-family: $serif; - padding-top: 3px; + position: relative; + top: 1px; + font-family: $sans-serif; + font-size: 14px; text-shadow: 0 1px rgba(255,255,255, 0.6); - &:hover { - color: $base-font-color; + &:after { + position: relative; + top: -1px; + display: inline-block; + margin: 0 0 0 5px; + content: "➤"; + font-size: 11px; + } + + .org-name { + color: $blue; + font-family: $sans-serif; + text-transform: none; } } } diff --git a/lms/static/sass/multicourse/_media-kit.scss b/lms/static/sass/multicourse/_media-kit.scss new file mode 100644 index 0000000000..db73029fd3 --- /dev/null +++ b/lms/static/sass/multicourse/_media-kit.scss @@ -0,0 +1,260 @@ +// vars +$baseline: 20px; +$white: rgb(255,255,255); + +.mediakit { + @include box-sizing(border-box); + margin: 0 auto; + padding: ($baseline*3) 0; + width: 980px; + + .wrapper-mediakit { + @include border-radius(4px); + @include box-sizing(border-box); + @include box-shadow(0 1px 10px 0 rgba(0,0,0, 0.1)); + margin: ($baseline*3) 0 0 0; + border: 1px solid $border-color; + padding: ($baseline*2) ($baseline*3); + + > section { + margin: 0 0 ($baseline*2) 0; + + &:last-child { + margin-bottom: 0; + } + + header { + + } + } + } + + h1 { + margin: 0 0 $baseline 0; + position: relative; + font-size: 36px; + } + + hr { + @extend .faded-hr-divider-light; + border: none; + margin: 0px; + position: relative; + z-index: 2; + + &::after { + @extend .faded-hr-divider; + bottom: 0px; + content: ""; + display: block; + position: absolute; + top: -1px; + } + } + + // general + a.action-download { + position: relative; + color: $blue; + font-family: $sans-serif; + text-decoration: none; + @include transition(all, 0.1s, linear); + + .note { + position: relative; + color: $blue; + font-family: $sans-serif; + font-size: 13px; + text-decoration: none; + @include transition(all, 0.1s, linear); + + &:before { + position: relative; + top: -1px; + margin: 0 5px 0 0; + content: "➤"; + font-size: 11px; + } + } + + &:hover { + + .note { + color: shade($blue, 25%); + } + } + } + + // introduction section + .introduction { + @include clearfix(); + + header { + margin: 0 0 ($baseline*1.5) 0; + + h2 { + margin: 0; + color: rgb(178, 181, 185); + font-size: 32px; + + .org-name { + color: rgb(178, 181, 185); + font-family: $serif; + text-transform: none; + } + } + } + + article { + @include box-sizing(border-box); + width: 500px; + margin-right: $baseline; + float: left; + } + + aside { + @include border-radius(2px); + @include box-sizing(border-box); + @include box-shadow(0 1px 4px 0 rgba(0,0,0, 0.2)); + width: 330px; + float: left; + border: 3px solid tint(rgb(96, 155, 216), 35%); + background: tint(rgb(96, 155, 216), 35%); + + h3 { + padding: ($baseline/2) ($baseline*0.75); + font-family: $sans-serif; + font-weight: bold; + font-size: 16px; + letter-spacing: 0; + color: $white; + text-transform: uppercase; + + .org-name { + color: $white !important; + font-weight: bold; + text-transform: none; + } + } + + a.action-download { + + .note { + width: 100%; + display: inline-block; + text-align: center; + } + } + + figure { + @include box-sizing(border-box); + background: $white; + width: 100%; + + figcaption { + display: none; + } + + a { + display: block; + padding: ($baseline/2); + } + + img { + display: block; + margin: 0 auto; + width: 60%; + } + } + } + } + + // library section + .library { + @include border-radius(2px); + @include box-sizing(border-box); + @include box-shadow(0 1px 4px 0 rgba(0,0,0, 0.2)); + border: 3px solid tint($light-gray,50%); + padding: 0; + background: tint($light-gray,50%); + + header { + padding: ($baseline*0.75) $baseline; + + h2 { + margin: 0; + padding: 0; + color: $dark-gray; + font-size: 16px; + font-family: $sans-serif; + font-weight: bold; + letter-spacing: 0; + + .org-name { + color: $dark-gray !important; + font-weight: bold; + text-transform: none; + } + } + } + + .listing { + @include clearfix(); + background: $white; + margin: 0; + padding: ($baseline*2); + list-style: none; + + li { + @include box-sizing(border-box); + overflow-y: auto; + float: left; + width: 350px; + margin: 0 0 $baseline 0; + + &:nth-child(odd) { + margin-right: ($baseline*3.5); + } + } + + figure { + + a { + @include border-radius(2px); + @include box-sizing(border-box); + @include box-shadow(0 1px 2px 0 rgba(0,0,0, 0.1)); + display: block; + min-height: 380px; + border: 2px solid tint($light-gray,75%); + padding: $baseline; + + &:hover { + border-color: $blue; + } + } + + img { + display: block; + border: 2px solid tint($light-gray,80%); + margin: 0 auto ($baseline*0.75) auto; + } + + figcaption { + font-size: 13px; + line-height: 18px; + color: $text-color; + } + + .note { + display: inline-block; + margin-top: ($baseline/2); + } + } + } + } + + // share + .share { + + } +} \ No newline at end of file diff --git a/lms/static/sass/multicourse/_press_release.scss b/lms/static/sass/multicourse/_press_release.scss index f0a2302f4e..7ee362617d 100644 --- a/lms/static/sass/multicourse/_press_release.scss +++ b/lms/static/sass/multicourse/_press_release.scss @@ -23,6 +23,10 @@ color: $base-font-color; font: normal 1em/1.6em $serif; margin: 0px; + + a { + font: 1em $serif; + } } li + li { diff --git a/lms/static/sass/shared/_course_object.scss b/lms/static/sass/shared/_course_object.scss index 374caf4898..2c638ed158 100644 --- a/lms/static/sass/shared/_course_object.scss +++ b/lms/static/sass/shared/_course_object.scss @@ -13,6 +13,23 @@ } } + .courses-listing { + @include clearfix(); + margin: 0; + padding: 0; + list-style: none; + + .courses-listing-item { + width: flex-grid(4); + margin-right: flex-gutter(); + float: left; + + &:nth-child(3n+3) { + margin-right: 0; + } + } + } + .course { background: rgb(250,250,250); border: 1px solid rgb(180,180,180); @@ -24,6 +41,31 @@ width: 100%; @include transition(all, 0.15s, linear); + .status { + background: $blue; + color: white; + font-size: 10px; + left: 10px; + padding: 2px 10px; + @include border-radius(2px); + position: absolute; + text-transform: uppercase; + top: -6px; + z-index: 100; + } + + .status:after { + border-bottom: 6px solid shade($blue, 50%); + border-right: 6px solid transparent; + content: ""; + display: block; + height: 0; + position: absolute; + right: -6px; + top: 0; + width: 0; + } + a:hover { text-decoration: none; } diff --git a/lms/static/scripts/boxsizing.htc b/lms/static/scripts/boxsizing.htc index 43bd86fae1..40f5ab4e12 100644 --- a/lms/static/scripts/boxsizing.htc +++ b/lms/static/scripts/boxsizing.htc @@ -85,7 +85,10 @@ function update(){ } resizetimeout = window.setTimeout(function(){ restore(); - init(); + try { + init(); + } + catch (err) {} resizetimeout = null; },100); } diff --git a/lms/templates/accounts_login.html b/lms/templates/accounts_login.html new file mode 100644 index 0000000000..011ca643c6 --- /dev/null +++ b/lms/templates/accounts_login.html @@ -0,0 +1,92 @@ +<%! from django.core.urlresolvers import reverse %> +<%inherit file="main.html" /> +<%namespace name='static' file='static_content.html'/> + +<%block name="headextra"> + + + + + + diff --git a/lms/templates/combined_open_ended.html b/lms/templates/combined_open_ended.html new file mode 100644 index 0000000000..71c22085e3 --- /dev/null +++ b/lms/templates/combined_open_ended.html @@ -0,0 +1,22 @@ +
+ +
+

Status


+ ${status | n} +
+ +
+

Problem


+ % for item in items: +
${item['content'] | n}
+ % endfor + + + + +
+ +
+
+
+ diff --git a/lms/templates/combined_open_ended_results.html b/lms/templates/combined_open_ended_results.html new file mode 100644 index 0000000000..db86e95016 --- /dev/null +++ b/lms/templates/combined_open_ended_results.html @@ -0,0 +1,4 @@ +
+

Results from Step ${task_number}


+ ${results | n} +
\ No newline at end of file diff --git a/lms/templates/combined_open_ended_status.html b/lms/templates/combined_open_ended_status.html new file mode 100644 index 0000000000..34a5dd0d79 --- /dev/null +++ b/lms/templates/combined_open_ended_status.html @@ -0,0 +1,28 @@ +
+ %for i in xrange(0,len(status_list)): + <%status=status_list[i]%> + %if i==len(status_list)-1: +
+ %else: +
+ %endif + + Step ${status['task_number']} (${status['human_state']}) : ${status['score']} / ${status['max_score']} + % if status['state'] == 'initial': + + % elif status['state'] in ['done', 'post_assessment'] and status['correct'] == 'correct': + + % elif status['state'] in ['done', 'post_assessment'] and status['correct'] == 'incorrect': + + % elif status['state'] == 'assessing': + + % endif + + %if status['type']=="openended" and status['state'] in ['done', 'post_assessment']: + + %endif +
+ %endfor +
\ No newline at end of file diff --git a/lms/templates/course.html b/lms/templates/course.html index 50a00f9d31..a2eff572e1 100644 --- a/lms/templates/course.html +++ b/lms/templates/course.html @@ -5,6 +5,9 @@ %> <%page args="course" />
+ %if course.is_new: + New + %endif
diff --git a/lms/templates/courseware/courses.html b/lms/templates/courseware/courses.html index 0c45faa923..a8fe851d19 100644 --- a/lms/templates/courseware/courses.html +++ b/lms/templates/courseware/courses.html @@ -20,21 +20,13 @@ ## I'm removing this for now since we aren't using it for the fall. ## <%include file="course_filter.html" />
-
- %for course in universities['MITx']: +
    + %for course in courses: +
  • <%include file="../course.html" args="course=course" /> +
  • %endfor -
-
- %for course in universities['HarvardX']: - <%include file="../course.html" args="course=course" /> - %endfor -
-
- %for course in universities['BerkeleyX']: - <%include file="../course.html" args="course=course" /> - %endfor -
+
diff --git a/lms/templates/courseware/instructor_dashboard.html b/lms/templates/courseware/instructor_dashboard.html index 74bc25fcbe..7f1912cd45 100644 --- a/lms/templates/courseware/instructor_dashboard.html +++ b/lms/templates/courseware/instructor_dashboard.html @@ -57,10 +57,13 @@ function goto( mode) Psychometrics | %endif Admin | - Forum Admin ] + Forum Admin | + Enrollment + ] -
${djangopid}
+
${djangopid} + | ${mitx_version}
@@ -68,6 +71,12 @@ function goto( mode) ##----------------------------------------------------------------------------- %if modeflag.get('Grades'): + + %if offline_grade_log: +

Pre-computed grades ${offline_grade_log} available: Use? +

+ %endif +

Gradebook

@@ -93,6 +102,42 @@ function goto( mode)

+
+ + %if settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') and instructor_access: + + <% + rg = course.metadata.get('remote_gradebook',{}) + %> + +

Export grades to remote gradebook

+

The assignments defined for this course should match the ones + stored in the gradebook, for this to work properly!

+ +
    +
  • Gradebook name: ${rg.get('name','None defined!')} +
    +
    + + +
    +
    +
  • +
  • +
    +
    +
  • +
  • Assignment name: +
    +
    + + + +
  • +
+ + %endif + %endif ##----------------------------------------------------------------------------- @@ -128,6 +173,16 @@ function goto( mode)
%endif + %if admin_access: +
+

+ +

+ + +


+ %endif + %if settings.MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] and admin_access:

@@ -163,10 +218,52 @@ function goto( mode) %endif %endif +##----------------------------------------------------------------------------- +%if modeflag.get('Enrollment'): + +


+

+ + +

+ Student Email: + + +


+ + %if settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') and instructor_access: + + <% + rg = course.metadata.get('remote_gradebook',{}) + %> + +

Pull enrollment from remote gradebook

+
    +
  • Gradebook name: ${rg.get('name','None defined!')} +
  • Section:
  • +
+ + + + +
+ + %endif + +

Add students: enter emails, separated by returns or commas;

+ + + +%endif + +##----------------------------------------------------------------------------- +
##----------------------------------------------------------------------------- -%if modeflag.get('Psychometrics') is None: +##----------------------------------------------------------------------------- + +%if datatable and modeflag.get('Psychometrics') is None:

diff --git a/lms/templates/courseware/progress.html b/lms/templates/courseware/progress.html index 81268ff081..fb163d112d 100644 --- a/lms/templates/courseware/progress.html +++ b/lms/templates/courseware/progress.html @@ -18,7 +18,7 @@ diff --git a/lms/templates/courseware/progress_graph.js b/lms/templates/courseware/progress_graph.js index 189137ada3..449cad766f 100644 --- a/lms/templates/courseware/progress_graph.js +++ b/lms/templates/courseware/progress_graph.js @@ -1,4 +1,4 @@ -<%page args="grade_summary, grade_cutoffs, graph_div_id, **kwargs"/> +<%page args="grade_summary, grade_cutoffs, graph_div_id, show_grade_breakdown = True, show_grade_cutoffs = True, **kwargs"/> <%! import json import math @@ -70,25 +70,26 @@ $(function () { series = categories.values() overviewBarX = tickIndex extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[] - - for section in grade_summary['grade_breakdown']: - if section['percent'] > 0: - if section['category'] in categories: - color = categories[ section['category'] ]['color'] - else: - color = colors[ extraColorIndex % len(colors) ] - extraColorIndex += 1 - - series.append({ - 'label' : section['category'] + "-grade_breakdown", - 'data' : [ [overviewBarX, section['percent']] ], - 'color' : color - }) - - detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ] - ticks += [ [overviewBarX, "Total"] ] - tickIndex += 1 + sectionSpacer + if show_grade_breakdown: + for section in grade_summary['grade_breakdown']: + if section['percent'] > 0: + if section['category'] in categories: + color = categories[ section['category'] ]['color'] + else: + color = colors[ extraColorIndex % len(colors) ] + extraColorIndex += 1 + + series.append({ + 'label' : section['category'] + "-grade_breakdown", + 'data' : [ [overviewBarX, section['percent']] ], + 'color' : color + }) + + detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ] + + ticks += [ [overviewBarX, "Total"] ] + tickIndex += 1 + sectionSpacer totalScore = grade_summary['percent'] detail_tooltips['Dropped Scores'] = dropped_score_tooltips @@ -97,10 +98,14 @@ $(function () { ## ----------------------------- Grade cutoffs ------------------------- ## grade_cutoff_ticks = [ [1, "100%"], [0, "0%"] ] - descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True) - for grade in descending_grades: - percent = grade_cutoffs[grade] - grade_cutoff_ticks.append( [ percent, "{0} {1:.0%}".format(grade, percent) ] ) + if show_grade_cutoffs: + grade_cutoff_ticks = [ [1, "100%"], [0, "0%"] ] + descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True) + for grade in descending_grades: + percent = grade_cutoffs[grade] + grade_cutoff_ticks.append( [ percent, "{0} {1:.0%}".format(grade, percent) ] ) + else: + grade_cutoff_ticks = [ ] %> var series = ${ json.dumps( series ) }; @@ -135,9 +140,11 @@ $(function () { var $grade_detail_graph = $("#${graph_div_id}"); if ($grade_detail_graph.length > 0) { var plot = $.plot($grade_detail_graph, series, options); - //We need to put back the plotting of the percent here - var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}}); - $grade_detail_graph.append('
${"{totalscore:.0%}".format(totalscore=totalScore)}
'); + + %if show_grade_breakdown: + var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}}); + $grade_detail_graph.append('
${"{totalscore:.0%}".format(totalscore=totalScore)}
'); + %endif } var previousPoint = null; diff --git a/lms/templates/feed.rss b/lms/templates/feed.rss index 872ed46ff1..415199141d 100644 --- a/lms/templates/feed.rss +++ b/lms/templates/feed.rss @@ -6,7 +6,16 @@ ## EdX Blog - 2012-12-10T14:00:12-07:00 + 2012-12-19T14:00:12-07:00 + + tag:www.edx.org,2012:Post/10 + 2012-12-19T14:00:00-07:00 + 2012-12-19T14:00:00-07:00 + + edX announces first wave of new courses for Spring 2013 + <img src="${static.url('images/press/releases/edx-logo_240x180.png')}" /> + <p></p> + tag:www.edx.org,2012:Post/9 2012-12-10T14:00:00-07:00 diff --git a/lms/templates/footer.html b/lms/templates/footer.html index 96c80d151d..7fe7c18ccc 100644 --- a/lms/templates/footer.html +++ b/lms/templates/footer.html @@ -6,7 +6,7 @@
+ +

Instructions

@@ -35,6 +37,8 @@
+ +

diff --git a/lms/templates/open_ended.html b/lms/templates/open_ended.html new file mode 100644 index 0000000000..cda3282a45 --- /dev/null +++ b/lms/templates/open_ended.html @@ -0,0 +1,31 @@ +
+
+
+ ${prompt|n} +
+ + +
+
+ % if state == 'initial': + Unanswered + % elif state in ['done', 'post_assessment'] and correct == 'correct': + Correct + % elif state in ['done', 'post_assessment'] and correct == 'incorrect': + Incorrect + % elif state == 'assessing': + Submitted for grading + % endif + + % if hidden: +
+ % endif +
+ + + + +
+ + +
diff --git a/lms/templates/open_ended_evaluation.html b/lms/templates/open_ended_evaluation.html new file mode 100644 index 0000000000..da3f38b6a9 --- /dev/null +++ b/lms/templates/open_ended_evaluation.html @@ -0,0 +1,23 @@ +
+ ${msg|n} +
+
+ Respond to Feedback +
+
+

How accurate do you find this feedback?

+
+
    +
  • +
  • +
  • +
  • +
  • +
+
+

Additional comments:

+ + +
+
+
\ No newline at end of file diff --git a/lms/templates/open_ended_feedback.html b/lms/templates/open_ended_feedback.html index cb90006456..d8aa3d1a9e 100644 --- a/lms/templates/open_ended_feedback.html +++ b/lms/templates/open_ended_feedback.html @@ -12,5 +12,6 @@
${ feedback | n}
+ ${rubric_feedback | n}
\ No newline at end of file diff --git a/lms/templates/open_ended_rubric.html b/lms/templates/open_ended_rubric.html new file mode 100644 index 0000000000..9f8a2ece4e --- /dev/null +++ b/lms/templates/open_ended_rubric.html @@ -0,0 +1,30 @@ + + % for i in range(len(rubric_categories)): + <% category = rubric_categories[i] %> + + + % for j in range(len(category['options'])): + <% option = category['options'][j] %> + + % endfor + + % endfor +
+ ${category['description']} + % if category['has_score'] == True: + (Your score: ${category['score']}) + % endif + +
+ ${option['text']} + % if option.has_key('selected'): + % if option['selected'] == True: +
[${option['points']} points]
+ %else: +
[${option['points']} points]
+ % endif + % else: +
[${option['points']} points]
+ %endif +
+
\ No newline at end of file diff --git a/lms/templates/peer_grading/peer_grading.html b/lms/templates/peer_grading/peer_grading.html new file mode 100644 index 0000000000..484bb94182 --- /dev/null +++ b/lms/templates/peer_grading/peer_grading.html @@ -0,0 +1,39 @@ +<%inherit file="/main.html" /> +<%block name="bodyclass">${course.css_class} +<%namespace name='static' file='/static_content.html'/> + +<%block name="headextra"> + <%static:css group='course'/> + + +<%block name="title">${course.number} Peer Grading + +<%include file="/courseware/course_navigation.html" args="active_page='staff_grading'" /> + +<%block name="js_extra"> + <%static:js group='peer_grading'/> + + +
+
+
${error_text}
+

Peer Grading

+

Instructions

+

Here are a list of problems that need to be peer graded for this course.

+ % if success: + % if len(problem_list) == 0: +
+ Nothing to grade! +
+ %else: + + %endif + %endif +
+
diff --git a/lms/templates/peer_grading/peer_grading_problem.html b/lms/templates/peer_grading/peer_grading_problem.html new file mode 100644 index 0000000000..d493e84ace --- /dev/null +++ b/lms/templates/peer_grading/peer_grading_problem.html @@ -0,0 +1,112 @@ + +<%inherit file="/main.html" /> +<%block name="bodyclass">${course.css_class} +<%namespace name='static' file='/static_content.html'/> + +<%block name="headextra"> + <%static:css group='course'/> + + +<%block name="title">${course.number} Peer Grading. + +<%include file="/courseware/course_navigation.html" args="active_page='staff_grading'" /> + +<%block name="js_extra"> + <%static:js group='peer_grading'/> + + + +
+
+
+ +
+

Peer Grading

+
+
+

Learning to Grade

+
+

Before you can do any proper peer grading, you first need to understand how your own grading compares to that of the instrutor. Once your grades begin to match the instructor's, you will move on to grading your peers!

+
+
+

You have successfully managed to calibrate your answers to that of the instructors and have moved onto the next step in the peer grading process.

+
+
+
+

Grading

+
+

You cannot start grading until you have graded a sufficient number of training problems and have been able to demonstrate that your scores closely match that of the instructor.

+
+
+

Now that you have finished your training, you are now allowed to grade your peers. Please keep in mind that students are allowed to respond to the grades and feedback they receive.

+
+
+
+ +
+
+
Question
+
+
+
+
+
+
+
Rubric
+
+
+
+
+
+ +
+ + +
+

Grading

+ +
+
+

+
+
+ + +
+
+

+

+ +
+ + +
+ +
+ +
+
+
+ +
+
+ + +
+

How did I do?

+
+
+ +
+ + +
+

Congratulations!

+

You have now completed the calibration step. You are now ready to start grading.

+ +
+ + +
+
diff --git a/lms/templates/self_assessment_hint.html b/lms/templates/self_assessment_hint.html index 64c45b809e..1adfc69e39 100644 --- a/lms/templates/self_assessment_hint.html +++ b/lms/templates/self_assessment_hint.html @@ -2,6 +2,6 @@
${hint_prompt}
-
diff --git a/lms/templates/self_assessment_prompt.html b/lms/templates/self_assessment_prompt.html index 91472cbdaf..2ec83ef2a7 100644 --- a/lms/templates/self_assessment_prompt.html +++ b/lms/templates/self_assessment_prompt.html @@ -1,5 +1,5 @@ -
+
${prompt} @@ -9,6 +9,8 @@
+
+
${initial_rubric}
${initial_hint}
@@ -16,5 +18,4 @@
${initial_message}
-
diff --git a/lms/templates/self_assessment_rubric.html b/lms/templates/self_assessment_rubric.html index 5bcb3bba93..2d32ffe8d3 100644 --- a/lms/templates/self_assessment_rubric.html +++ b/lms/templates/self_assessment_rubric.html @@ -1,7 +1,7 @@

Self-assess your answer with this rubric:

- ${rubric} + ${rubric | n }
% if not read_only: diff --git a/lms/templates/static_templates/faq.html b/lms/templates/static_templates/faq.html index 96e781e817..b0543df264 100644 --- a/lms/templates/static_templates/faq.html +++ b/lms/templates/static_templates/faq.html @@ -21,19 +21,9 @@

What is edX?

edX is a not-for-profit enterprise of its founding partners, the Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

-

EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in fall 2013, edX will offer WellesleyX and GeorgetownX classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning – both on-campus and online throughout the world.

+

EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in fall 2013, edX will offer WellesleyX and GeorgetownX classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning both on-campus and online throughout the world.

-
-

Why is Georgetown University joining edX?

-

Georgetown University, the oldest Catholic and Jesuit university in America, has a long history of providing courses of the highest quality through its schools of foreign service, law, medicine, nursing, business, as well as the arts and sciences. GeorgetownX courses, and the mission-driven Georgetown faculty, will provide a new perspective from which the hundreds of thousands of edX learners can benefit.

-

Georgetown offers a world-class learning experience focused on educating the whole person through exposure to different faiths, cultures and beliefs. Georgetown's global perspective with presences in Qatar, Shanghai, Santiago, Buenos Aires and London aligns with edX's mission to extend access to education around the world and to perform research into how students learn and how technology can transform learning both on-campus and online.

-

As with all consortium members, the values of Georgetown are aligned with those of edX. Georgetown and edX are both committed to expanding access to education to learners of all ages, means, and backgrounds. Both institutions are also committed to the non-profit model. We value principle not profit.

-
-
-

How many GeorgetownX courses will be offered initially? When?

-

Initially, GeorgetownX will begin offering edX courses in the fall of 2013. The courses, which will offer students the opportunity to explore a variety of subjects, will be of the same high quality and rigor as those offered on the Georgetown University campus.

-

Will edX be adding additional X Universities?

More than 200 institutions from around the world have expressed interest in collaborating with edX since Harvard and MIT announced its creation in May. EdX is focused above all on quality and developing the best not-for-profit model for online education. In addition to providing online courses on the edX platform, the "X University" Consortium will be a forum in which members can share experiences around online learning. Harvard, MIT, UC Berkeley, the University of Texas system and the other consortium members will work collaboratively to establish the "X University" Consortium, whose membership will expand to include additional "X Universities". Each member of the consortium will offer courses on the edX platform as an "X University." The gathering of many universities' educational content together on one site will enable learners worldwide to access the offered course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

diff --git a/lms/templates/static_templates/jobs.html b/lms/templates/static_templates/jobs.html index 2d921eb76f..f2752a0939 100644 --- a/lms/templates/static_templates/jobs.html +++ b/lms/templates/static_templates/jobs.html @@ -38,7 +38,7 @@

EdX is looking to add new talent to our team!

Our mission is to give a world-class education to everyone, everywhere, regardless of gender, income or social status

-

Today, EdX.org, a not-for-profit provides hundreds of thousands of people from around the globe with access free education.  We offer amazing quality classes by the best professors from the best schools. We enable our members to uncover a new passion that will transform their lives and their communities.

+

Today, EdX.org, a not-for-profit provides hundreds of thousands of people from around the globe with access to free education.  We offer amazing quality classes by the best professors from the best schools. We enable our members to uncover a new passion that will transform their lives and their communities.

Around the world-from coast to coast, in over 192 countries, people are making the decision to take one or several of our courses. As we continue to grow our operations, we are looking for talented, passionate people with great ideas to join the edX team. We aim to create an environment that is supportive, diverse, and as fun as our brand. If you're results-oriented, dedicated, and ready to contribute to an unparalleled member experience for our community, we really want you to apply.

As part of the edX team, you’ll receive:

    @@ -51,8 +51,40 @@
+
+
+

INSTRUCTIONAL DESIGNER — CONTRACT OPPORTUNITY

+

The Instructional Designer will work collaboratively with the edX content and engineering teams to plan, develop and deliver highly engaging and media rich online courses. The Instructional Designer will be a flexible thinker, able to determine and apply sound pedagogical strategies to unique situations and a diverse set of academic disciplines.

+

Responsibilities:

+
    +
  • Work with the video production team, product managers and course staff on the implementation of instructional design approaches in the development of media and other course materials.
  • +
  • Based on course staff and faculty input, articulate learning objectives and align them to design strategies and assessments.
  • +
  • Develop flipped classroom instructional strategies in coordination with community college faculty.
  • +
  • Produce clear and instructionally effective copy, instructional text, and audio and video scripts
  • +
  • Identify and deploy instructional design best practices for edX course staff and faculty as needed.
  • +
  • Create course communication style guides. Train and coach teaching staff on best practices for communication and discussion management.
  • +
  • Serve as a liaison to instructional design teams based at X universities.
  • +
  • Consult on peer review processes to be used by learners in selected courses.
  • +
  • Ability to apply game-based learning theory and design into selected courses as appropriate.
  • +
  • Use learning analytics and metrics to inform course design and revision process.
  • +
  • Collaborate with key research and learning sciences stakeholders at edX and partner institutions for the development of best practices for MOOC teaching and learning and course design.
  • +
  • Support the development of pilot courses and modules used for sponsored research initiatives.
  • +
+

Qualifications:

+
    +
  • Master's Degree in Educational Technology, Instructional Design or related field. Experience in higher education with additional experience in a start-up or research environment preferable.
  • +
  • Excellent interpersonal and communication (written and verbal), project management, problem-solving and time management skills. The ability to be flexible with projects and to work on multiple courses essential. Ability to meet deadlines and manage expectations of constituents.
  • +
  • Capacity to develop new and relevant technology skills. Experience using game theory design and learning analytics to inform instructional design decisions and strategy.
  • +
  • Technical Skills: Video and screencasting experience. LMS Platform experience, xml, HTML, CSS, Adobe Design Suite, Camtasia or Captivate experience. Experience with web 2.0 collaboration tools.
  • +
+

Eligible candidates will be invited to respond to an Instructional Design task based on current or future edX course development needs.

+

If you are interested in this position, please send an email to jobs@edx.org.

+
+
+
-

MEMBER SERVICES MANAGER

+
+

MEMBER SERVICES MANAGER

The edX Member Services Manager is responsible for both defining support best practices and directly supporting edX members by handling or routing issues that come in from our websites, email and social media tools.  We are looking for a passionate person to help us define and own this experience. While this is a Manager level position, we see this candidate quickly moving through the ranks, leading a larger team of employees over time. This staff member will be running our fast growth support organization.

Responsibilities:

    @@ -69,7 +101,7 @@

Qualifications:

    -
  • 5-8 years in a call center or support team management
  • +
  • 5-8 years in a call center or support team management
  • Exemplary customer service skills
  • Experience in creating and rolling out support/service best practices
  • Solid computer skills – must be fluent with desktop applications and have a basic understanding of web technologies (i.e. basic HTML)
  • @@ -85,43 +117,43 @@
-
+
-

MEMBER SERVICES ASSOCIATE

-

The edX Member Services Associate is responsible for providing customer service and support to edX customers by handling or routing issues that come in from our websites, email and social media tools.

+

DIRECTOR OF PR AND COMMUNICATIONS

+

The edX Director of PR & Communications is responsible for creating and executing all PR strategy and providing company-wide leadership to help create and refine the edX core messages and identity as the revolutionary global leader in both on-campus and worldwide education. The Director will design and direct a communications program that conveys cohesive and compelling information about edX's mission, activities, personnel and products while establishing a distinct identity for edX as the leader in online education for both students and learning institutions.

Responsibilities:

    -
  • Resolves issues according to edX policies; escalates non-routine issues.
  • -
  • Educates members on edX policies and getting started
  • -
  • May assist new members with edX procedures and processing registration issues.
  • -
  • Provides timely follow-up and resolution to issues.
  • -
  • May specialize in specific area of operation or handle more complex issues.
  • -
  • A passion for doing the right thing - at edX the member is always our top priority.
    -
  • +
  • Develop and execute goals and strategy for a comprehensive external and internal communications program focused on driving student engagement around courses and institutional adoption of the edX learning platform.
  • +
  • Work with media, either directly or through our agency of record, to establish edX as the industry leader in global learning.
  • +
  • Work with key influencers including government officials on a global scale to ensure the edX mission, content and tools are embraced and supported worldwide.
  • +
  • Work with marketing colleagues to co-develop and/or monitor and evaluate the content and delivery of all communications messages and collateral.
  • +
  • Initiate and/or plan thought leadership events developed to heighten target-audience awareness; participate in meetings and trade shows
  • +
  • Conduct periodic research to determine communications benchmarks
  • +
  • Inform employees about edX's vision, values, policies, and strategies to enable them to perform their jobs efficiently and drive morale.
  • +
  • Work with and manage existing communications team to effectively meet strategic goals.

Qualifications:

    -
  • 1-3 years prior experience in customer service role.
  • -
  • Excellent customer service skills.
  • -
  • Problem solving-the individual identifies and resolves problems in a timely manner, gathers and analyzes information skillfully and maintains confidentiality.
  • -
  • Interpersonal skills-the individual maintains confidentiality, remains open to others' ideas and exhibits willingness to try new things.
  • -
  • Oral communication-the individual speaks clearly and persuasively in positive or negative situations and demonstrates group presentation skills.
  • -
  • Written communication-the individual edits work for spelling and grammar, presents numerical data effectively and is able to read and interpret written information.
  • -
  • Adaptability-the individual adapts to changes in the work environment, manages competing demands and is able to deal with frequent change, delays or unexpected events.
  • -
  • Dependability-the individual is consistently at work and on time, follows instructions, responds to management direction and solicits feedback to improve performance.
  • -
  • College degree preferred.
  • -
  • Basic computer skills.
  • +
  • Ten years of experience in PR and communications
  • +
  • Ability to work creatively and provide company-wide leadership in a fast-paced, dynamic start-up environment required
  • +
  • Adaptability - the individual adapts to changes in the work environment, manages competing demands and is able to deal with frequent change, delays or unexpected events.
  • +
  • Experience in working in successful consumer-focused startups preferred
  • +
  • PR agency experience in setting strategy for complex multichannel, multinational organizations a plus.
  • +
  • Extensive writing experience and simply amazing oral, written, and interpersonal communications skills
  • +
  • B.A./B.S. in communications or related field

If you are interested in this position, please send an email to jobs@edx.org.

+

Positions

How to Apply

E-mail your resume, coverletter and any other materials to jobs@edx.org

diff --git a/lms/templates/static_templates/media-kit.html b/lms/templates/static_templates/media-kit.html new file mode 100644 index 0000000000..458cfb8e15 --- /dev/null +++ b/lms/templates/static_templates/media-kit.html @@ -0,0 +1,111 @@ +<%namespace name='static' file='../static_content.html'/> +<%inherit file="../main.html" /> + +<%block name="title">edX Media Kit + +
+

edX Media Kit

+ +
+ +
+
+
+

Welcome to the edX Media Kit

+
+ +
+

Need images for a news story? Feel free to download high-resolution versions of the photos below by clicking on the thumbnail. Please credit edX in your use.

+

We’ve included visual guidelines on how to use the edX logo within the download zip which also includes Adobe Illustrator and eps versions of the logo.

+

For more information about edX, please contact Dan O'Connell Associate Director of Communications via oconnell@edx.org.

+
+ + + +
+ +
+
+

The edX Media Library

+
+ +
+ +
+
+
+
+ +<%block name="js_extra"> + + \ No newline at end of file diff --git a/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html b/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html new file mode 100644 index 0000000000..77e7beb5f7 --- /dev/null +++ b/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html @@ -0,0 +1,75 @@ +<%! from django.core.urlresolvers import reverse %> +<%inherit file="../../main.html" /> + +<%namespace name='static' file='../../static_content.html'/> + +<%block name="title">EdX expands platform, announces first wave of courses for spring 2013 +
+ + +
+
+

EdX expands platform, announces first wave of courses for spring 2013

+
+ +
+

Leading minds from top universities to offer world-wide MOOC courses on statistics, history, justice, and poverty

+ +

CAMBRIDGE, MA – December 19, 2012 —EdX, the not-for-profit online learning initiative founded by Harvard University and the Massachusetts Institute of Technology (MIT), announced today its initial spring 2013 schedule including its first set of courses in the humanities and social sciences – introductory courses with wide, global appeal. In its second semester, edX expands its online courses to a variety of subjects ranging from the ancient Greek hero to the riddle of world poverty, all taught by experts at some of the world’s leading universities. EdX is also bringing back several courses from its popular offerings in the fall semester.

+ +

“EdX is both revolutionizing and democratizing education,” said Anant Agarwal, President of edX. “In just eight months we’ve attracted more than half a million unique users from around the world to our learning portal. Now, with these spring courses we are entering a new era – and are poised to touch millions of lives with the best courses from the best faculty at the best institutions in the world.”

+ +

Building on the success of its initial offerings, edX is broadening the courses on its innovative educational platform. In its second semester – now open for registration – edX continues with courses from some of the world’s most esteemed faculty from UC Berkeley, Harvard and MIT. Spring 2013 courses include:

+ + + +

“I'm delighted to have my Justice course on edX,” said Michael Sandel, Ann T. and Robert M. Bass Professor of Government at Harvard University, “where students everywhere will be able to engage in a global dialogue about the big moral and civic questions of our time.”

+ +

In addition to these new courses, edX is bringing back several courses from the popular fall 2012 semester: Introduction to Computer Science and Programming; Introduction to Solid State Chemistry; Introduction to Artificial Intelligence; Software as a Service I; Software as a Service II; Foundations of Computer Graphics.

+ +

This spring also features Harvard's Copyright, taught by Harvard Law School professor William Fisher III, former law clerk to Justice Thurgood Marshall and expert on the hotly debated U.S. copyright system, which will explore the current law of copyright and the ongoing debates concerning how that law should be reformed. Copyright will be offered as an experimental course, taking advantage of different combinations and uses of teaching materials, educational technologies, and the edX platform. 500 learners will be selected through an open application process that will run through January 3rd 2013.

+ +

These new courses would not be possible without the contributions of key edX institutions, including UC Berkeley, which is the inaugural chair of the “X University” consortium and major contributor to the platform. All of the courses will be hosted on edX’s innovative platform at www.edx.org and are open for registration as of today. EdX expects to announce a second set of spring 2013 courses in the future.

+ +

About edX

+ +

EdX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology focused on transforming online and on-campus learning through groundbreaking methodologies, game-like experiences and cutting-edge research. EdX provides inspirational and transformative knowledge to students of all ages, social status, and income who form worldwide communities of learners. EdX uses its open source technology to transcend physical and social borders. We’re focused on people, not profit. EdX is based in Cambridge, Massachusetts in the USA.

+ +
+

Contact: Brad Baker

+

BBaker@webershandwick.com

+

617-520-7260

+
+ + +
+
+
diff --git a/lms/templates/video.html b/lms/templates/video.html index 5c041d5c70..4d4df8c3c7 100644 --- a/lms/templates/video.html +++ b/lms/templates/video.html @@ -2,17 +2,21 @@

${display_name}

% endif -
-
-
-
-
-
- -
-
+%if settings.MITX_FEATURES['STUB_VIDEO_FOR_TESTING']: +
+%else: +
+
+
+
+
+
+
+
+
-
+%endif + % if source:

Download video here.

diff --git a/lms/urls.py b/lms/urls.py index 3b476f4821..cab0533f89 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -37,6 +37,8 @@ urlpatterns = ('', url(r'^event$', 'track.views.user_track'), url(r'^t/(?P