diff --git a/apt-packages.txt b/apt-packages.txt new file mode 100644 index 0000000000..b783ccb67e --- /dev/null +++ b/apt-packages.txt @@ -0,0 +1,25 @@ +python-software-properties +pkg-config +curl +git +python-virtualenv +build-essential +python-dev +gfortran +liblapack-dev +libfreetype6-dev +libpng12-dev +libxml2-dev +libxslt-dev +yui-compressor +graphviz +graphviz-dev +mysql-server +libmysqlclient-dev +libgeos-dev +libreadline6 +libreadline6-dev +mongodb +nodejs +npm +coffeescript diff --git a/apt-repos.txt b/apt-repos.txt new file mode 100644 index 0000000000..6ce9f2c34b --- /dev/null +++ b/apt-repos.txt @@ -0,0 +1,3 @@ +ppa:chris-lea/node.js +ppa:chris-lea/node.js-libs +ppa:chris-lea/libjs-underscore diff --git a/brew-formulas.txt b/brew-formulas.txt index b5b555e2a0..061297edc5 100644 --- a/brew-formulas.txt +++ b/brew-formulas.txt @@ -1,10 +1,12 @@ -readline -sqlite -gdbm -pkg-config -gfortran -python -yuicompressor +readline +sqlite +gdbm +pkg-config +gfortran +python +yuicompressor node graphviz mysql +geos +mongodb diff --git a/cms/.coveragerc b/cms/.coveragerc index 42638feb8f..9b1e59d670 100644 --- a/cms/.coveragerc +++ b/cms/.coveragerc @@ -2,11 +2,13 @@ [run] data_file = reports/cms/.coverage source = cms +omit = cms/envs/*, cms/manage.py [report] ignore_errors = True [html] +title = CMS Python Test Coverage Report directory = reports/cms/cover [xml] diff --git a/common/djangoapps/mitxmako/shortcuts.py b/common/djangoapps/mitxmako/shortcuts.py index ba22f2db20..ebeb0fc180 100644 --- a/common/djangoapps/mitxmako/shortcuts.py +++ b/common/djangoapps/mitxmako/shortcuts.py @@ -12,10 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging - -log = logging.getLogger("mitx." + __name__) - from django.template import Context from django.http import HttpResponse @@ -42,7 +38,7 @@ def render_to_string(template_name, dictionary, context=None, namespace='main'): context_dictionary.update(context) # fetch and render template template = middleware.lookup[namespace].get_template(template_name) - return template.render(**context_dictionary) + return template.render_unicode(**context_dictionary) def render_to_response(template_name, dictionary, context_instance=None, namespace='main', **kwargs): diff --git a/common/djangoapps/mitxmako/template.py b/common/djangoapps/mitxmako/template.py index 56096fe173..947dc8c1a4 100644 --- a/common/djangoapps/mitxmako/template.py +++ b/common/djangoapps/mitxmako/template.py @@ -54,5 +54,4 @@ class Template(MakoTemplate): context_dictionary['MITX_ROOT_URL'] = settings.MITX_ROOT_URL context_dictionary['django_context'] = context_instance - return super(Template, self).render(**context_dictionary) - + return super(Template, self).render_unicode(**context_dictionary) diff --git a/common/djangoapps/student/admin.py b/common/djangoapps/student/admin.py index ec3b708ca7..64fe844801 100644 --- a/common/djangoapps/student/admin.py +++ b/common/djangoapps/student/admin.py @@ -12,6 +12,8 @@ admin.site.register(UserTestGroup) admin.site.register(CourseEnrollment) +admin.site.register(CourseEnrollmentAllowed) + admin.site.register(Registration) admin.site.register(PendingNameChange) diff --git a/common/djangoapps/student/management/commands/pearson_export_cdd.py b/common/djangoapps/student/management/commands/pearson_export_cdd.py index b10e92d92d..67230c7f74 100644 --- a/common/djangoapps/student/management/commands/pearson_export_cdd.py +++ b/common/djangoapps/student/management/commands/pearson_export_cdd.py @@ -1,14 +1,17 @@ import csv -import uuid -from collections import defaultdict, OrderedDict +from collections import OrderedDict from datetime import datetime +from os.path import isdir +from optparse import make_option -from django.core.management.base import BaseCommand, CommandError +from django.core.management.base import BaseCommand from student.models import TestCenterUser class Command(BaseCommand): + CSV_TO_MODEL_FIELDS = OrderedDict([ + # Skipping optional field CandidateID ("ClientCandidateID", "client_candidate_id"), ("FirstName", "first_name"), ("LastName", "last_name"), @@ -34,9 +37,17 @@ class Command(BaseCommand): ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store ]) - args = '' + option_list = BaseCommand.option_list + ( + make_option( + '--dump_all', + action='store_true', + dest='dump_all', + ), + ) + + args = '' help = """ - Export user information from TestCenterUser model into a tab delimited + Export user demographic information from TestCenterUser model into a tab delimited text file with a format that Pearson expects. """ def handle(self, *args, **kwargs): @@ -44,9 +55,33 @@ class Command(BaseCommand): print Command.help return - self.reset_sample_data() + # update time should use UTC in order to be comparable to the user_updated_at + # field + uploaded_at = datetime.utcnow() - with open(args[0], "wb") as outfile: + # if specified destination is an existing directory, then + # create a filename for it automatically. If it doesn't exist, + # or exists as a file, then we will just write to it. + # Name will use timestamp -- this is UTC, so it will look funny, + # but it should at least be consistent with the other timestamps + # used in the system. + dest = args[0] + if isdir(dest): + destfile = os.path.join(dest, uploaded_at.strftime("cdd-%Y%m%d-%H%M%S.dat")) + else: + destfile = dest + + # strings must be in latin-1 format. CSV parser will + # otherwise convert unicode objects to ascii. + def ensure_encoding(value): + if isinstance(value, unicode): + return value.encode('iso-8859-1') + else: + return value + + dump_all = kwargs['dump_all'] + + with open(destfile, "wb") as outfile: writer = csv.DictWriter(outfile, Command.CSV_TO_MODEL_FIELDS, delimiter="\t", @@ -54,103 +89,14 @@ class Command(BaseCommand): extrasaction='ignore') writer.writeheader() for tcu in TestCenterUser.objects.order_by('id'): - record = dict((csv_field, getattr(tcu, model_field)) - for csv_field, model_field - in Command.CSV_TO_MODEL_FIELDS.items()) - record["LastUpdate"] = record["LastUpdate"].strftime("%Y/%m/%d %H:%M:%S") - writer.writerow(record) + if dump_all or tcu.needs_uploading: + record = dict((csv_field, ensure_encoding(getattr(tcu, model_field))) + for csv_field, model_field + in Command.CSV_TO_MODEL_FIELDS.items()) + record["LastUpdate"] = record["LastUpdate"].strftime("%Y/%m/%d %H:%M:%S") + writer.writerow(record) + tcu.uploaded_at = uploaded_at + tcu.save() - def reset_sample_data(self): - def make_sample(**kwargs): - data = dict((model_field, kwargs.get(model_field, "")) - for model_field in Command.CSV_TO_MODEL_FIELDS.values()) - return TestCenterUser(**data) - - def generate_id(): - return "edX{:012}".format(uuid.uuid4().int % (10**12)) - - # TestCenterUser.objects.all().delete() - - samples = [ - make_sample( - client_candidate_id=generate_id(), - first_name="Jack", - last_name="Doe", - middle_name="C", - address_1="11 Cambridge Center", - address_2="Suite 101", - city="Cambridge", - state="MA", - postal_code="02140", - country="USA", - phone="(617)555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Clyde", - last_name="Smith", - middle_name="J", - suffix="Jr.", - salutation="Mr.", - address_1="1 Penny Lane", - city="Honolulu", - state="HI", - postal_code="96792", - country="USA", - phone="555-555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Patty", - last_name="Lee", - salutation="Dr.", - address_1="P.O. Box 555", - city="Honolulu", - state="HI", - postal_code="96792", - country="USA", - phone="808-555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Jimmy", - last_name="James", - address_1="2020 Palmer Blvd.", - city="Springfield", - state="MA", - postal_code="96792", - country="USA", - phone="917-555-5555", - phone_country_code="1", - extension="2039", - fax="917-555-5556", - fax_country_code="1", - company_name="ACME Traps", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Yeong-Un", - last_name="Seo", - address_1="Duryu, Lotte 101", - address_2="Apt 55", - city="Daegu", - country="KOR", - phone="917-555-5555", - phone_country_code="011", - user_updated_at=datetime.utcnow() - ), - ] - for tcu in samples: - tcu.save() - - - \ No newline at end of file diff --git a/common/djangoapps/student/management/commands/pearson_export_ead.py b/common/djangoapps/student/management/commands/pearson_export_ead.py index 415f0812ae..de3bfc04ee 100644 --- a/common/djangoapps/student/management/commands/pearson_export_ead.py +++ b/common/djangoapps/student/management/commands/pearson_export_ead.py @@ -1,150 +1,93 @@ import csv -import uuid -from collections import defaultdict, OrderedDict +from collections import OrderedDict from datetime import datetime +from os.path import isdir, join +from optparse import make_option -from django.core.management.base import BaseCommand, CommandError +from django.core.management.base import BaseCommand -from student.models import TestCenterUser - -def generate_id(): - return "{:012}".format(uuid.uuid4().int % (10**12)) +from student.models import TestCenterRegistration class Command(BaseCommand): - args = '' + + CSV_TO_MODEL_FIELDS = OrderedDict([ + ('AuthorizationTransactionType', 'authorization_transaction_type'), + ('AuthorizationID', 'authorization_id'), + ('ClientAuthorizationID', 'client_authorization_id'), + ('ClientCandidateID', 'client_candidate_id'), + ('ExamAuthorizationCount', 'exam_authorization_count'), + ('ExamSeriesCode', 'exam_series_code'), + ('Accommodations', 'accommodation_code'), + ('EligibilityApptDateFirst', 'eligibility_appointment_date_first'), + ('EligibilityApptDateLast', 'eligibility_appointment_date_last'), + ("LastUpdate", "user_updated_at"), # in UTC, so same as what we store + ]) + + args = '' help = """ - Export user information from TestCenterUser model into a tab delimited + Export user registration information from TestCenterRegistration model into a tab delimited text file with a format that Pearson expects. """ - FIELDS = [ - 'AuthorizationTransactionType', - 'AuthorizationID', - 'ClientAuthorizationID', - 'ClientCandidateID', - 'ExamAuthorizationCount', - 'ExamSeriesCode', - 'EligibilityApptDateFirst', - 'EligibilityApptDateLast', - 'LastUpdate', - ] + + option_list = BaseCommand.option_list + ( + make_option( + '--dump_all', + action='store_true', + dest='dump_all', + ), + make_option( + '--force_add', + action='store_true', + dest='force_add', + ), + ) + def handle(self, *args, **kwargs): if len(args) < 1: print Command.help return - # self.reset_sample_data() + # update time should use UTC in order to be comparable to the user_updated_at + # field + uploaded_at = datetime.utcnow() - with open(args[0], "wb") as outfile: + # if specified destination is an existing directory, then + # create a filename for it automatically. If it doesn't exist, + # or exists as a file, then we will just write to it. + # Name will use timestamp -- this is UTC, so it will look funny, + # but it should at least be consistent with the other timestamps + # used in the system. + dest = args[0] + if isdir(dest): + destfile = join(dest, uploaded_at.strftime("ead-%Y%m%d-%H%M%S.dat")) + else: + destfile = dest + + dump_all = kwargs['dump_all'] + + with open(destfile, "wb") as outfile: writer = csv.DictWriter(outfile, - Command.FIELDS, + Command.CSV_TO_MODEL_FIELDS, delimiter="\t", quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') writer.writeheader() - for tcu in TestCenterUser.objects.order_by('id')[:5]: - record = defaultdict( - lambda: "", - AuthorizationTransactionType="Add", - ClientAuthorizationID=generate_id(), - ClientCandidateID=tcu.client_candidate_id, - ExamAuthorizationCount="1", - ExamSeriesCode="6002x001", - EligibilityApptDateFirst="2012/12/15", - EligibilityApptDateLast="2012/12/30", - LastUpdate=datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S") - ) - writer.writerow(record) + for tcr in TestCenterRegistration.objects.order_by('id'): + if dump_all or tcr.needs_uploading: + record = dict((csv_field, getattr(tcr, model_field)) + for csv_field, model_field + in Command.CSV_TO_MODEL_FIELDS.items()) + record["LastUpdate"] = record["LastUpdate"].strftime("%Y/%m/%d %H:%M:%S") + record["EligibilityApptDateFirst"] = record["EligibilityApptDateFirst"].strftime("%Y/%m/%d") + record["EligibilityApptDateLast"] = record["EligibilityApptDateLast"].strftime("%Y/%m/%d") + if kwargs['force_add']: + record['AuthorizationTransactionType'] = 'Add' + + writer.writerow(record) + tcr.uploaded_at = uploaded_at + tcr.save() - def reset_sample_data(self): - def make_sample(**kwargs): - data = dict((model_field, kwargs.get(model_field, "")) - for model_field in Command.CSV_TO_MODEL_FIELDS.values()) - return TestCenterUser(**data) - - # TestCenterUser.objects.all().delete() - - samples = [ - make_sample( - client_candidate_id=generate_id(), - first_name="Jack", - last_name="Doe", - middle_name="C", - address_1="11 Cambridge Center", - address_2="Suite 101", - city="Cambridge", - state="MA", - postal_code="02140", - country="USA", - phone="(617)555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Clyde", - last_name="Smith", - middle_name="J", - suffix="Jr.", - salutation="Mr.", - address_1="1 Penny Lane", - city="Honolulu", - state="HI", - postal_code="96792", - country="USA", - phone="555-555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Patty", - last_name="Lee", - salutation="Dr.", - address_1="P.O. Box 555", - city="Honolulu", - state="HI", - postal_code="96792", - country="USA", - phone="808-555-5555", - phone_country_code="1", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Jimmy", - last_name="James", - address_1="2020 Palmer Blvd.", - city="Springfield", - state="MA", - postal_code="96792", - country="USA", - phone="917-555-5555", - phone_country_code="1", - extension="2039", - fax="917-555-5556", - fax_country_code="1", - company_name="ACME Traps", - user_updated_at=datetime.utcnow() - ), - make_sample( - client_candidate_id=generate_id(), - first_name="Yeong-Un", - last_name="Seo", - address_1="Duryu, Lotte 101", - address_2="Apt 55", - city="Daegu", - country="KOR", - phone="917-555-5555", - phone_country_code="011", - user_updated_at=datetime.utcnow() - ), - ] - for tcu in samples: - tcu.save() - - - \ No newline at end of file diff --git a/common/djangoapps/student/management/commands/pearson_make_tc_registration.py b/common/djangoapps/student/management/commands/pearson_make_tc_registration.py new file mode 100644 index 0000000000..81a478d19d --- /dev/null +++ b/common/djangoapps/student/management/commands/pearson_make_tc_registration.py @@ -0,0 +1,196 @@ +from optparse import make_option +from time import strftime + +from django.contrib.auth.models import User +from django.core.management.base import BaseCommand, CommandError + +from student.models import TestCenterUser, TestCenterRegistration, TestCenterRegistrationForm, get_testcenter_registration +from student.views import course_from_id +from xmodule.course_module import CourseDescriptor +from xmodule.modulestore.exceptions import ItemNotFoundError + +class Command(BaseCommand): + option_list = BaseCommand.option_list + ( + # registration info: + make_option( + '--accommodation_request', + action='store', + dest='accommodation_request', + ), + make_option( + '--accommodation_code', + action='store', + dest='accommodation_code', + ), + make_option( + '--client_authorization_id', + action='store', + dest='client_authorization_id', + ), + # exam info: + make_option( + '--exam_series_code', + action='store', + dest='exam_series_code', + ), + make_option( + '--eligibility_appointment_date_first', + action='store', + dest='eligibility_appointment_date_first', + help='use YYYY-MM-DD format if overriding existing course values, or YYYY-MM-DDTHH:MM if not using an existing course.' + ), + make_option( + '--eligibility_appointment_date_last', + action='store', + dest='eligibility_appointment_date_last', + help='use YYYY-MM-DD format if overriding existing course values, or YYYY-MM-DDTHH:MM if not using an existing course.' + ), + # internal values: + make_option( + '--authorization_id', + action='store', + dest='authorization_id', + help='ID we receive from Pearson for a particular authorization' + ), + make_option( + '--upload_status', + action='store', + dest='upload_status', + help='status value assigned by Pearson' + ), + make_option( + '--upload_error_message', + action='store', + dest='upload_error_message', + help='error message provided by Pearson on a failure.' + ), + # control values: + make_option( + '--ignore_registration_dates', + action='store_true', + dest='ignore_registration_dates', + help='find exam info for course based on exam_series_code, even if the exam is not active.' + ), + ) + args = "" + help = "Create or modify a TestCenterRegistration entry for a given Student" + + @staticmethod + def is_valid_option(option_name): + base_options = set(option.dest for option in BaseCommand.option_list) + return option_name not in base_options + + + def handle(self, *args, **options): + username = args[0] + course_id = args[1] + print username, course_id + + our_options = dict((k, v) for k, v in options.items() + if Command.is_valid_option(k) and v is not None) + try: + student = User.objects.get(username=username) + except User.DoesNotExist: + raise CommandError("User \"{}\" does not exist".format(username)) + + try: + testcenter_user = TestCenterUser.objects.get(user=student) + except TestCenterUser.DoesNotExist: + raise CommandError("User \"{}\" does not have an existing demographics record".format(username)) + + # check to see if a course_id was specified, and use information from that: + try: + course = course_from_id(course_id) + if 'ignore_registration_dates' in our_options: + examlist = [exam for exam in course.test_center_exams if exam.exam_series_code == our_options.get('exam_series_code')] + exam = examlist[0] if len(examlist) > 0 else None + else: + exam = course.current_test_center_exam + except ItemNotFoundError: + # otherwise use explicit values (so we don't have to define a course): + exam_name = "Dummy Placeholder Name" + exam_info = { 'Exam_Series_Code': our_options['exam_series_code'], + 'First_Eligible_Appointment_Date' : our_options['eligibility_appointment_date_first'], + 'Last_Eligible_Appointment_Date' : our_options['eligibility_appointment_date_last'], + } + exam = CourseDescriptor.TestCenterExam(course_id, exam_name, exam_info) + # update option values for date_first and date_last to use YYYY-MM-DD format + # instead of YYYY-MM-DDTHH:MM + our_options['eligibility_appointment_date_first'] = strftime("%Y-%m-%d", exam.first_eligible_appointment_date) + our_options['eligibility_appointment_date_last'] = strftime("%Y-%m-%d", exam.last_eligible_appointment_date) + + if exam is None: + raise CommandError("Exam for course_id {%s} does not exist".format(course_id)) + + exam_code = exam.exam_series_code + + UPDATE_FIELDS = ( 'accommodation_request', + 'accommodation_code', + 'client_authorization_id', + 'exam_series_code', + 'eligibility_appointment_date_first', + 'eligibility_appointment_date_last', + ) + + # create and save the registration: + needs_updating = False + registrations = get_testcenter_registration(student, course_id, exam_code) + if len(registrations) > 0: + registration = registrations[0] + for fieldname in UPDATE_FIELDS: + if fieldname in our_options and registration.__getattribute__(fieldname) != our_options[fieldname]: + needs_updating = True; + else: + accommodation_request = our_options.get('accommodation_request','') + registration = TestCenterRegistration.create(testcenter_user, exam, accommodation_request) + needs_updating = True + + + if needs_updating: + # first update the record with the new values, if any: + for fieldname in UPDATE_FIELDS: + if fieldname in our_options and fieldname not in TestCenterRegistrationForm.Meta.fields: + registration.__setattr__(fieldname, our_options[fieldname]) + + # the registration form normally populates the data dict with + # the accommodation request (if any). But here we want to + # specify only those values that might change, so update the dict with existing + # values. + form_options = dict(our_options) + for propname in TestCenterRegistrationForm.Meta.fields: + if propname not in form_options: + form_options[propname] = registration.__getattribute__(propname) + form = TestCenterRegistrationForm(instance=registration, data=form_options) + if form.is_valid(): + form.update_and_save() + print "Updated registration information for user's registration: username \"{}\" course \"{}\", examcode \"{}\"".format(student.username, course_id, exam_code) + else: + if (len(form.errors) > 0): + print "Field Form errors encountered:" + for fielderror in form.errors: + print "Field Form Error: %s" % fielderror + if (len(form.non_field_errors()) > 0): + print "Non-field Form errors encountered:" + for nonfielderror in form.non_field_errors: + print "Non-field Form Error: %s" % nonfielderror + + else: + print "No changes necessary to make to existing user's registration." + + # override internal values: + change_internal = False + if 'exam_series_code' in our_options: + exam_code = our_options['exam_series_code'] + registration = get_testcenter_registration(student, course_id, exam_code)[0] + for internal_field in [ 'upload_error_message', 'upload_status', 'authorization_id']: + if internal_field in our_options: + registration.__setattr__(internal_field, our_options[internal_field]) + change_internal = True + + if change_internal: + print "Updated confirmation information in existing user's registration." + registration.save() + else: + print "No changes necessary to make to confirmation information in existing user's registration." + + diff --git a/common/djangoapps/student/management/commands/pearson_make_tc_user.py b/common/djangoapps/student/management/commands/pearson_make_tc_user.py index d974c25b6b..da9bfc3bd0 100644 --- a/common/djangoapps/student/management/commands/pearson_make_tc_user.py +++ b/common/djangoapps/student/management/commands/pearson_make_tc_user.py @@ -1,35 +1,53 @@ -import uuid -from datetime import datetime from optparse import make_option from django.contrib.auth.models import User -from django.core.management.base import BaseCommand, CommandError +from django.core.management.base import BaseCommand -from student.models import TestCenterUser +from student.models import TestCenterUser, TestCenterUserForm class Command(BaseCommand): option_list = BaseCommand.option_list + ( - make_option( - '--client_candidate_id', - action='store', - dest='client_candidate_id', - help='ID we assign a user to identify them to Pearson' - ), + # demographics: make_option( '--first_name', action='store', dest='first_name', ), + make_option( + '--middle_name', + action='store', + dest='middle_name', + ), make_option( '--last_name', action='store', dest='last_name', ), + make_option( + '--suffix', + action='store', + dest='suffix', + ), + make_option( + '--salutation', + action='store', + dest='salutation', + ), make_option( '--address_1', action='store', dest='address_1', ), + make_option( + '--address_2', + action='store', + dest='address_2', + ), + make_option( + '--address_3', + action='store', + dest='address_3', + ), make_option( '--city', action='store', @@ -58,15 +76,56 @@ class Command(BaseCommand): dest='phone', help='Pretty free-form (parens, spaces, dashes), but no country code' ), + make_option( + '--extension', + action='store', + dest='extension', + ), make_option( '--phone_country_code', action='store', dest='phone_country_code', help='Phone country code, just "1" for the USA' ), + make_option( + '--fax', + action='store', + dest='fax', + help='Pretty free-form (parens, spaces, dashes), but no country code' + ), + make_option( + '--fax_country_code', + action='store', + dest='fax_country_code', + help='Fax country code, just "1" for the USA' + ), + make_option( + '--company_name', + action='store', + dest='company_name', + ), + # internal values: + make_option( + '--client_candidate_id', + action='store', + dest='client_candidate_id', + help='ID we assign a user to identify them to Pearson' + ), + make_option( + '--upload_status', + action='store', + dest='upload_status', + help='status value assigned by Pearson' + ), + make_option( + '--upload_error_message', + action='store', + dest='upload_error_message', + help='error message provided by Pearson on a failure.' + ), ) args = "" - help = "Create a TestCenterUser entry for a given Student" + help = "Create or modify a TestCenterUser entry for a given Student" @staticmethod def is_valid_option(option_name): @@ -79,7 +138,52 @@ class Command(BaseCommand): print username our_options = dict((k, v) for k, v in options.items() - if Command.is_valid_option(k)) + if Command.is_valid_option(k) and v is not None) student = User.objects.get(username=username) - student.test_center_user = TestCenterUser(**our_options) - student.test_center_user.save() + try: + testcenter_user = TestCenterUser.objects.get(user=student) + needs_updating = testcenter_user.needs_update(our_options) + except TestCenterUser.DoesNotExist: + # do additional initialization here: + testcenter_user = TestCenterUser.create(student) + needs_updating = True + + if needs_updating: + # the registration form normally populates the data dict with + # all values from the testcenter_user. But here we only want to + # specify those values that change, so update the dict with existing + # values. + form_options = dict(our_options) + for propname in TestCenterUser.user_provided_fields(): + if propname not in form_options: + form_options[propname] = testcenter_user.__getattribute__(propname) + form = TestCenterUserForm(instance=testcenter_user, data=form_options) + if form.is_valid(): + form.update_and_save() + else: + if (len(form.errors) > 0): + print "Field Form errors encountered:" + for fielderror in form.errors: + print "Field Form Error: %s" % fielderror + if (len(form.non_field_errors()) > 0): + print "Non-field Form errors encountered:" + for nonfielderror in form.non_field_errors: + print "Non-field Form Error: %s" % nonfielderror + + else: + print "No changes necessary to make to existing user's demographics." + + # override internal values: + change_internal = False + testcenter_user = TestCenterUser.objects.get(user=student) + for internal_field in [ 'upload_error_message', 'upload_status', 'client_candidate_id']: + if internal_field in our_options: + testcenter_user.__setattr__(internal_field, our_options[internal_field]) + change_internal = True + + if change_internal: + testcenter_user.save() + print "Updated confirmation information in existing user's demographics." + else: + print "No changes necessary to make to confirmation information in existing user's demographics." + diff --git a/common/djangoapps/student/migrations/0021_remove_askbot.py b/common/djangoapps/student/migrations/0021_remove_askbot.py index 89f7208f40..83ad6791f2 100644 --- a/common/djangoapps/student/migrations/0021_remove_askbot.py +++ b/common/djangoapps/student/migrations/0021_remove_askbot.py @@ -26,14 +26,17 @@ class Migration(SchemaMigration): def forwards(self, orm): "Kill the askbot" - # For MySQL, we're batching the alters together for performance reasons - if db.backend_name == 'mysql': - drops = ["drop `{0}`".format(col) for col in ASKBOT_AUTH_USER_COLUMNS] - statement = "alter table `auth_user` {0};".format(", ".join(drops)) - db.execute(statement) - else: - for column in ASKBOT_AUTH_USER_COLUMNS: - db.delete_column('auth_user', column) + try: + # For MySQL, we're batching the alters together for performance reasons + if db.backend_name == 'mysql': + drops = ["drop `{0}`".format(col) for col in ASKBOT_AUTH_USER_COLUMNS] + statement = "alter table `auth_user` {0};".format(", ".join(drops)) + db.execute(statement) + else: + for column in ASKBOT_AUTH_USER_COLUMNS: + db.delete_column('auth_user', column) + except Exception as ex: + print "Couldn't remove askbot because of {0} -- it was probably never here to begin with.".format(ex) def backwards(self, orm): raise RuntimeError("Cannot reverse this migration: there's no going back to Askbot.") diff --git a/common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py b/common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py new file mode 100644 index 0000000000..f7e2571685 --- /dev/null +++ b/common/djangoapps/student/migrations/0022_auto__add_courseenrollmentallowed__add_unique_courseenrollmentallowed_.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'CourseEnrollmentAllowed' + db.create_table('student_courseenrollmentallowed', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + )) + db.send_create_signal('student', ['CourseEnrollmentAllowed']) + + # Adding unique constraint on 'CourseEnrollmentAllowed', fields ['email', 'course_id'] + db.create_unique('student_courseenrollmentallowed', ['email', 'course_id']) + + + def backwards(self, orm): + # Removing unique constraint on 'CourseEnrollmentAllowed', fields ['email', 'course_id'] + db.delete_unique('student_courseenrollmentallowed', ['email', 'course_id']) + + # Deleting model 'CourseEnrollmentAllowed' + db.delete_table('student_courseenrollmentallowed') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'student.courseenrollment': { + 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + }, + 'student.courseenrollmentallowed': { + 'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) + }, + 'student.pendingemailchange': { + 'Meta': {'object_name': 'PendingEmailChange'}, + 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.pendingnamechange': { + 'Meta': {'object_name': 'PendingNameChange'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), + 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.registration': { + 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, + 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.testcenteruser': { + 'Meta': {'object_name': 'TestCenterUser'}, + 'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}), + 'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), + 'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), + 'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), + 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), + 'client_candidate_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), + 'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), + 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), + 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), + 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), + 'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}), + 'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), + 'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), + 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), + 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), + 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), + 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), + 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), + 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}), + 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) + }, + 'student.userprofile': { + 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, + 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), + 'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), + 'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), + 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}), + 'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) + }, + 'student.usertestgroup': { + 'Meta': {'object_name': 'UserTestGroup'}, + 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), + 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) + } + } + + complete_apps = ['student'] \ No newline at end of file diff --git a/common/djangoapps/student/migrations/0023_add_test_center_registration.py b/common/djangoapps/student/migrations/0023_add_test_center_registration.py new file mode 100644 index 0000000000..c5af38dd37 --- /dev/null +++ b/common/djangoapps/student/migrations/0023_add_test_center_registration.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'TestCenterRegistration' + db.create_table('student_testcenterregistration', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('testcenter_user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['student.TestCenterUser'])), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), + ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)), + ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)), + ('user_updated_at', self.gf('django.db.models.fields.DateTimeField')(db_index=True)), + ('client_authorization_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20, db_index=True)), + ('exam_series_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)), + ('eligibility_appointment_date_first', self.gf('django.db.models.fields.DateField')(db_index=True)), + ('eligibility_appointment_date_last', self.gf('django.db.models.fields.DateField')(db_index=True)), + ('accommodation_code', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), + ('accommodation_request', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=1024, blank=True)), + ('uploaded_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), + ('processed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), + ('upload_status', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=20, blank=True)), + ('upload_error_message', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)), + ('authorization_id', self.gf('django.db.models.fields.IntegerField')(null=True, db_index=True)), + ('confirmed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), + )) + db.send_create_signal('student', ['TestCenterRegistration']) + + # Adding field 'TestCenterUser.uploaded_at' + db.add_column('student_testcenteruser', 'uploaded_at', + self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True), + keep_default=False) + + # Adding field 'TestCenterUser.processed_at' + db.add_column('student_testcenteruser', 'processed_at', + self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), + keep_default=False) + + # Adding field 'TestCenterUser.upload_status' + db.add_column('student_testcenteruser', 'upload_status', + self.gf('django.db.models.fields.CharField')(db_index=True, default='', max_length=20, blank=True), + keep_default=False) + + # Adding field 'TestCenterUser.upload_error_message' + db.add_column('student_testcenteruser', 'upload_error_message', + self.gf('django.db.models.fields.CharField')(default='', max_length=512, blank=True), + keep_default=False) + + # Adding field 'TestCenterUser.confirmed_at' + db.add_column('student_testcenteruser', 'confirmed_at', + self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True), + keep_default=False) + + # Adding index on 'TestCenterUser', fields ['company_name'] + db.create_index('student_testcenteruser', ['company_name']) + + # Adding unique constraint on 'TestCenterUser', fields ['client_candidate_id'] + db.create_unique('student_testcenteruser', ['client_candidate_id']) + + + def backwards(self, orm): + # Removing unique constraint on 'TestCenterUser', fields ['client_candidate_id'] + db.delete_unique('student_testcenteruser', ['client_candidate_id']) + + # Removing index on 'TestCenterUser', fields ['company_name'] + db.delete_index('student_testcenteruser', ['company_name']) + + # Deleting model 'TestCenterRegistration' + db.delete_table('student_testcenterregistration') + + # Deleting field 'TestCenterUser.uploaded_at' + db.delete_column('student_testcenteruser', 'uploaded_at') + + # Deleting field 'TestCenterUser.processed_at' + db.delete_column('student_testcenteruser', 'processed_at') + + # Deleting field 'TestCenterUser.upload_status' + db.delete_column('student_testcenteruser', 'upload_status') + + # Deleting field 'TestCenterUser.upload_error_message' + db.delete_column('student_testcenteruser', 'upload_error_message') + + # Deleting field 'TestCenterUser.confirmed_at' + db.delete_column('student_testcenteruser', 'confirmed_at') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'student.courseenrollment': { + 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + }, + 'student.courseenrollmentallowed': { + 'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) + }, + 'student.pendingemailchange': { + 'Meta': {'object_name': 'PendingEmailChange'}, + 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.pendingnamechange': { + 'Meta': {'object_name': 'PendingNameChange'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), + 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.registration': { + 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, + 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'student.testcenterregistration': { + 'Meta': {'object_name': 'TestCenterRegistration'}, + 'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), + 'accommodation_request': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'blank': 'True'}), + 'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), + 'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), + 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), + 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), + 'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), + 'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), + 'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}), + 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), + 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), + 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) + }, + 'student.testcenteruser': { + 'Meta': {'object_name': 'TestCenterUser'}, + 'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}), + 'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), + 'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), + 'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), + 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), + 'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), + 'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), + 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), + 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), + 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), + 'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}), + 'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), + 'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), + 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), + 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), + 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), + 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), + 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), + 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), + 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), + 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), + 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}), + 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) + }, + 'student.userprofile': { + 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, + 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), + 'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), + 'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), + 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}), + 'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) + }, + 'student.usertestgroup': { + 'Meta': {'object_name': 'UserTestGroup'}, + 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), + 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) + } + } + + complete_apps = ['student'] \ No newline at end of file diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py index 5975853a21..7b4a5fb9be 100644 --- a/common/djangoapps/student/models.py +++ b/common/djangoapps/student/models.py @@ -36,10 +36,12 @@ file and check it in at the same time as your model changes. To do that, 3. Add the migration file created in mitx/common/djangoapps/student/migrations/ """ from datetime import datetime -from hashlib import sha1 +import hashlib import json import logging import uuid +from random import randint +from time import strftime from django.conf import settings @@ -47,10 +49,9 @@ from django.contrib.auth.models import User from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver +from django.forms import ModelForm, forms import comment_client as cc -from django_comment_client.models import Role - log = logging.getLogger(__name__) @@ -126,6 +127,9 @@ class UserProfile(models.Model): def set_meta(self, js): self.meta = json.dumps(js) +TEST_CENTER_STATUS_ACCEPTED = "Accepted" +TEST_CENTER_STATUS_ERROR = "Error" + class TestCenterUser(models.Model): """This is our representation of the User for in-person testing, and specifically for Pearson at this point. A few things to note: @@ -141,6 +145,9 @@ class TestCenterUser(models.Model): The field names and lengths are modeled on the conventions and constraints of Pearson's data import system, including oddities such as suffix having a limit of 255 while last_name only gets 50. + + Also storing here the confirmation information received from Pearson (if any) + as to the success or failure of the upload. (VCDC file) """ # Our own record keeping... user = models.ForeignKey(User, unique=True, default=None) @@ -151,12 +158,8 @@ class TestCenterUser(models.Model): # updated_at, this will not get incremented when we do a batch data import. user_updated_at = models.DateTimeField(db_index=True) - # Unique ID given to us for this User by the Testing Center. It's null when - # we first create the User entry, and is assigned by Pearson later. - candidate_id = models.IntegerField(null=True, db_index=True) - - # Unique ID we assign our user for a the Test Center. - client_candidate_id = models.CharField(max_length=50, db_index=True) + # Unique ID we assign our user for the Test Center. + client_candidate_id = models.CharField(unique=True, max_length=50, db_index=True) # Name first_name = models.CharField(max_length=30, db_index=True) @@ -187,24 +190,376 @@ class TestCenterUser(models.Model): fax_country_code = models.CharField(max_length=3, blank=True) # Company - company_name = models.CharField(max_length=50, blank=True) + company_name = models.CharField(max_length=50, blank=True, db_index=True) + # time at which edX sent the registration to the test center + uploaded_at = models.DateTimeField(null=True, blank=True, db_index=True) + + # confirmation back from the test center, as well as timestamps + # on when they processed the request, and when we received + # confirmation back. + processed_at = models.DateTimeField(null=True, db_index=True) + upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted' + upload_error_message = models.CharField(max_length=512, blank=True) + # Unique ID given to us for this User by the Testing Center. It's null when + # we first create the User entry, and may be assigned by Pearson later. + # (However, it may never be set if we are always initiating such candidate creation.) + candidate_id = models.IntegerField(null=True, db_index=True) + confirmed_at = models.DateTimeField(null=True, db_index=True) + + @property + def needs_uploading(self): + return self.uploaded_at is None or self.uploaded_at < self.user_updated_at + + @staticmethod + def user_provided_fields(): + return [ 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation', + 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country', + 'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name'] + @property def email(self): return self.user.email + + def needs_update(self, fields): + for fieldname in TestCenterUser.user_provided_fields(): + if fieldname in fields and getattr(self, fieldname) != fields[fieldname]: + return True + + return False + + @staticmethod + def _generate_edx_id(prefix): + NUM_DIGITS = 12 + return u"{}{:012}".format(prefix, randint(1, 10**NUM_DIGITS-1)) + + @staticmethod + def _generate_candidate_id(): + return TestCenterUser._generate_edx_id("edX") + + @classmethod + def create(cls, user): + testcenter_user = cls(user=user) + # testcenter_user.candidate_id remains unset + # assign an ID of our own: + cand_id = cls._generate_candidate_id() + while TestCenterUser.objects.filter(client_candidate_id=cand_id).exists(): + cand_id = cls._generate_candidate_id() + testcenter_user.client_candidate_id = cand_id + return testcenter_user + @property + def is_accepted(self): + return self.upload_status == TEST_CENTER_STATUS_ACCEPTED + + @property + def is_rejected(self): + return self.upload_status == TEST_CENTER_STATUS_ERROR + + @property + def is_pending(self): + return not self.is_accepted and not self.is_rejected + +class TestCenterUserForm(ModelForm): + class Meta: + model = TestCenterUser + fields = ( 'first_name', 'middle_name', 'last_name', 'suffix', 'salutation', + 'address_1', 'address_2', 'address_3', 'city', 'state', 'postal_code', 'country', + 'phone', 'extension', 'phone_country_code', 'fax', 'fax_country_code', 'company_name') + + def update_and_save(self): + new_user = self.save(commit=False) + # create additional values here: + new_user.user_updated_at = datetime.utcnow() + new_user.upload_status = '' + new_user.save() + log.info("Updated demographic information for user's test center exam registration: username \"{}\" ".format(new_user.user.username)) + + # add validation: + + def clean_country(self): + code = self.cleaned_data['country'] + if code and len(code) != 3: + raise forms.ValidationError(u'Must be three characters (ISO 3166-1): e.g. USA, CAN, MNG') + return code + + def clean(self): + def _can_encode_as_latin(fieldvalue): + try: + fieldvalue.encode('iso-8859-1') + except UnicodeEncodeError: + return False + return True + + cleaned_data = super(TestCenterUserForm, self).clean() + + # check for interactions between fields: + if 'country' in cleaned_data: + country = cleaned_data.get('country') + if country == 'USA' or country == 'CAN': + if 'state' in cleaned_data and len(cleaned_data['state']) == 0: + self._errors['state'] = self.error_class([u'Required if country is USA or CAN.']) + del cleaned_data['state'] + + if 'postal_code' in cleaned_data and len(cleaned_data['postal_code']) == 0: + self._errors['postal_code'] = self.error_class([u'Required if country is USA or CAN.']) + del cleaned_data['postal_code'] + + if 'fax' in cleaned_data and len(cleaned_data['fax']) > 0 and 'fax_country_code' in cleaned_data and len(cleaned_data['fax_country_code']) == 0: + self._errors['fax_country_code'] = self.error_class([u'Required if fax is specified.']) + del cleaned_data['fax_country_code'] + + # check encoding for all fields: + cleaned_data_fields = [fieldname for fieldname in cleaned_data] + for fieldname in cleaned_data_fields: + if not _can_encode_as_latin(cleaned_data[fieldname]): + self._errors[fieldname] = self.error_class([u'Must only use characters in Latin-1 (iso-8859-1) encoding']) + del cleaned_data[fieldname] + + # Always return the full collection of cleaned data. + return cleaned_data + +# our own code to indicate that a request has been rejected. +ACCOMMODATION_REJECTED_CODE = 'NONE' + +ACCOMMODATION_CODES = ( + (ACCOMMODATION_REJECTED_CODE, 'No Accommodation Granted'), + ('EQPMNT', 'Equipment'), + ('ET12ET', 'Extra Time - 1/2 Exam Time'), + ('ET30MN', 'Extra Time - 30 Minutes'), + ('ETDBTM', 'Extra Time - Double Time'), + ('SEPRMM', 'Separate Room'), + ('SRREAD', 'Separate Room and Reader'), + ('SRRERC', 'Separate Room and Reader/Recorder'), + ('SRRECR', 'Separate Room and Recorder'), + ('SRSEAN', 'Separate Room and Service Animal'), + ('SRSGNR', 'Separate Room and Sign Language Interpreter'), + ) + +ACCOMMODATION_CODE_DICT = { code : name for (code, name) in ACCOMMODATION_CODES } + +class TestCenterRegistration(models.Model): + """ + This is our representation of a user's registration for in-person testing, + and specifically for Pearson at this point. A few things to note: + + * Pearson only supports Latin-1, so we have to make sure that the data we + capture here will work with that encoding. This is less of an issue + than for the TestCenterUser. + * Registrations are only created here when a user registers to take an exam in person. + + The field names and lengths are modeled on the conventions and constraints + of Pearson's data import system. + """ + # to find an exam registration, we key off of the user and course_id. + # If multiple exams per course are possible, we would also need to add the + # exam_series_code. + testcenter_user = models.ForeignKey(TestCenterUser, default=None) + course_id = models.CharField(max_length=128, db_index=True) + + created_at = models.DateTimeField(auto_now_add=True, db_index=True) + updated_at = models.DateTimeField(auto_now=True, db_index=True) + # user_updated_at happens only when the user makes a change to their data, + # and is something Pearson needs to know to manage updates. Unlike + # updated_at, this will not get incremented when we do a batch data import. + # The appointment dates, the exam count, and the accommodation codes can be updated, + # but hopefully this won't happen often. + user_updated_at = models.DateTimeField(db_index=True) + # "client_authorization_id" is our unique identifier for the authorization. + # This must be present for an update or delete to be sent to Pearson. + client_authorization_id = models.CharField(max_length=20, unique=True, db_index=True) + + # information about the test, from the course policy: + exam_series_code = models.CharField(max_length=15, db_index=True) + eligibility_appointment_date_first = models.DateField(db_index=True) + eligibility_appointment_date_last = models.DateField(db_index=True) + + # this is really a list of codes, using an '*' as a delimiter. + # So it's not a choice list. We use the special value of ACCOMMODATION_REJECTED_CODE + # to indicate the rejection of an accommodation request. + accommodation_code = models.CharField(max_length=64, blank=True) + + # store the original text of the accommodation request. + accommodation_request = models.CharField(max_length=1024, blank=True, db_index=True) + + # time at which edX sent the registration to the test center + uploaded_at = models.DateTimeField(null=True, db_index=True) + + # confirmation back from the test center, as well as timestamps + # on when they processed the request, and when we received + # confirmation back. + processed_at = models.DateTimeField(null=True, db_index=True) + upload_status = models.CharField(max_length=20, blank=True, db_index=True) # 'Error' or 'Accepted' + upload_error_message = models.CharField(max_length=512, blank=True) + # Unique ID given to us for this registration by the Testing Center. It's null when + # we first create the registration entry, and may be assigned by Pearson later. + # (However, it may never be set if we are always initiating such candidate creation.) + authorization_id = models.IntegerField(null=True, db_index=True) + confirmed_at = models.DateTimeField(null=True, db_index=True) + + @property + def candidate_id(self): + return self.testcenter_user.candidate_id + + @property + def client_candidate_id(self): + return self.testcenter_user.client_candidate_id + + @property + def authorization_transaction_type(self): + if self.authorization_id is not None: + return 'Update' + elif self.uploaded_at is None: + return 'Add' + else: + # TODO: decide what to send when we have uploaded an initial version, + # but have not received confirmation back from that upload. If the + # registration here has been changed, then we don't know if this changed + # registration should be submitted as an 'add' or an 'update'. + # + # If the first registration were lost or in error (e.g. bad code), + # the second should be an "Add". If the first were processed successfully, + # then the second should be an "Update". We just don't know.... + return 'Update' + + @property + def exam_authorization_count(self): + # TODO: figure out if this should really go in the database (with a default value). + return 1 + + @classmethod + def create(cls, testcenter_user, exam, accommodation_request): + registration = cls(testcenter_user = testcenter_user) + registration.course_id = exam.course_id + registration.accommodation_request = accommodation_request.strip() + registration.exam_series_code = exam.exam_series_code + registration.eligibility_appointment_date_first = strftime("%Y-%m-%d", exam.first_eligible_appointment_date) + registration.eligibility_appointment_date_last = strftime("%Y-%m-%d", exam.last_eligible_appointment_date) + registration.client_authorization_id = cls._create_client_authorization_id() + # accommodation_code remains blank for now, along with Pearson confirmation information + return registration + + @staticmethod + def _generate_authorization_id(): + return TestCenterUser._generate_edx_id("edXexam") + + @staticmethod + def _create_client_authorization_id(): + """ + Return a unique id for a registration, suitable for using as an authorization code + for Pearson. It must fit within 20 characters. + """ + # generate a random value, and check to see if it already is in use here + auth_id = TestCenterRegistration._generate_authorization_id() + while TestCenterRegistration.objects.filter(client_authorization_id=auth_id).exists(): + auth_id = TestCenterRegistration._generate_authorization_id() + return auth_id + + # methods for providing registration status details on registration page: + @property + def demographics_is_accepted(self): + return self.testcenter_user.is_accepted + + @property + def demographics_is_rejected(self): + return self.testcenter_user.is_rejected + + @property + def demographics_is_pending(self): + return self.testcenter_user.is_pending + + @property + def accommodation_is_accepted(self): + return len(self.accommodation_request) > 0 and len(self.accommodation_code) > 0 and self.accommodation_code != ACCOMMODATION_REJECTED_CODE + + @property + def accommodation_is_rejected(self): + return len(self.accommodation_request) > 0 and self.accommodation_code == ACCOMMODATION_REJECTED_CODE + + @property + def accommodation_is_pending(self): + return len(self.accommodation_request) > 0 and len(self.accommodation_code) == 0 + + @property + def accommodation_is_skipped(self): + return len(self.accommodation_request) == 0 + + @property + def registration_is_accepted(self): + return self.upload_status == TEST_CENTER_STATUS_ACCEPTED + + @property + def registration_is_rejected(self): + return self.upload_status == TEST_CENTER_STATUS_ERROR + + @property + def registration_is_pending(self): + return not self.registration_is_accepted and not self.registration_is_rejected + + # methods for providing registration status summary on dashboard page: + @property + def is_accepted(self): + return self.registration_is_accepted and self.demographics_is_accepted + + @property + def is_rejected(self): + return self.registration_is_rejected or self.demographics_is_rejected + + @property + def is_pending(self): + return not self.is_accepted and not self.is_rejected + + def get_accommodation_codes(self): + return self.accommodation_code.split('*') + + def get_accommodation_names(self): + return [ ACCOMMODATION_CODE_DICT.get(code, "Unknown code " + code) for code in self.get_accommodation_codes() ] + + @property + def registration_signup_url(self): + return settings.PEARSONVUE_SIGNINPAGE_URL + +class TestCenterRegistrationForm(ModelForm): + class Meta: + model = TestCenterRegistration + fields = ( 'accommodation_request', 'accommodation_code' ) + + def clean_accommodation_request(self): + code = self.cleaned_data['accommodation_request'] + if code and len(code) > 0: + return code.strip() + return code + + def update_and_save(self): + registration = self.save(commit=False) + # create additional values here: + registration.user_updated_at = datetime.utcnow() + registration.upload_status = '' + registration.save() + log.info("Updated registration information for user's test center exam registration: username \"{}\" course \"{}\", examcode \"{}\"".format(registration.testcenter_user.user.username, registration.course_id, registration.exam_series_code)) + + # TODO: add validation code for values added to accommodation_code field. + + + +def get_testcenter_registration(user, course_id, exam_series_code): + try: + tcu = TestCenterUser.objects.get(user=user) + except TestCenterUser.DoesNotExist: + return [] + return TestCenterRegistration.objects.filter(testcenter_user=tcu, course_id=course_id, exam_series_code=exam_series_code) + def unique_id_for_user(user): """ Return a unique id for a user, suitable for inserting into e.g. personalized survey links. - - Currently happens to be implemented as a sha1 hash of the username - (and thus assumes that usernames don't change). """ - # Using the user id as the salt because it's sort of random, and is already - # in the db. - salt = str(user.id) - return sha1(salt + user.username).hexdigest() + # include the secret key as a salt, and to make the ids unique across + # different LMS installs. + h = hashlib.md5() + h.update(settings.SECRET_KEY) + h.update(str(user.id)) + return h.hexdigest() ## TODO: Should be renamed to generic UserGroup, and possibly @@ -263,15 +618,22 @@ class CourseEnrollment(models.Model): return "[CourseEnrollment] %s: %s (%s)" % (self.user, self.course_id, self.created) -@receiver(post_save, sender=CourseEnrollment) -def assign_default_role(sender, instance, **kwargs): - if instance.user.is_staff: - role = Role.objects.get_or_create(course_id=instance.course_id, name="Moderator")[0] - else: - role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0] +class CourseEnrollmentAllowed(models.Model): + """ + Table of users (specified by email address strings) who are allowed to enroll in a specified course. + The user may or may not (yet) exist. Enrollment by users listed in this table is allowed + even if the enrollment time window is past. + """ + email = models.CharField(max_length=255, db_index=True) + course_id = models.CharField(max_length=255, db_index=True) - logging.info("assign_default_role: adding %s as %s" % (instance.user, role)) - instance.user.roles.add(role) + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + + class Meta: + unique_together = (('email', 'course_id'), ) + + def __unicode__(self): + return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created) #cache_relation(User.profile) diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py index 44877ef597..1a9648835e 100644 --- a/common/djangoapps/student/views.py +++ b/common/djangoapps/student/views.py @@ -1,15 +1,16 @@ import datetime import feedparser -import itertools +#import itertools import json import logging import random import string import sys -import time +#import time import urllib import uuid + from django.conf import settings from django.contrib.auth import logout, authenticate, login from django.contrib.auth.forms import PasswordResetForm @@ -26,21 +27,22 @@ from bs4 import BeautifulSoup from django.core.cache import cache from django_future.csrf import ensure_csrf_cookie, csrf_exempt -from student.models import (Registration, UserProfile, +from student.models import (Registration, UserProfile, TestCenterUser, TestCenterUserForm, + TestCenterRegistration, TestCenterRegistrationForm, PendingNameChange, PendingEmailChange, - CourseEnrollment, unique_id_for_user) + CourseEnrollment, unique_id_for_user, + get_testcenter_registration) from certificates.models import CertificateStatuses, certificate_status_for_student from xmodule.course_module import CourseDescriptor from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.modulestore.django import modulestore -from xmodule.modulestore.exceptions import ItemNotFoundError -from datetime import date +#from datetime import date from collections import namedtuple -from courseware.courses import get_courses_by_university +from courseware.courses import get_courses from courseware.access import has_access from statsd import statsd @@ -74,16 +76,21 @@ def index(request, extra_context={}, user=None): domain = settings.MITX_FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False if domain==False: # do explicit check, because domain=None is valid domain = request.META.get('HTTP_HOST') - universities = get_courses_by_university(None, - domain=domain) + + courses = get_courses(None, domain=domain) + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) # Get the 3 most recent news top_news = _get_news(top=3) - context = {'universities': universities, 'news': top_news} + context = {'courses': courses, 'news': top_news} context.update(extra_context) return render_to_response('index.html', context) + def course_from_id(course_id): """Return the CourseDescriptor corresponding to this course_id""" course_loc = CourseDescriptor.id_to_location(course_id) @@ -204,7 +211,7 @@ def _cert_info(user, course, cert_status): def dashboard(request): user = request.user enrollments = CourseEnrollment.objects.filter(user=user) - + # Build our courses list for the user, but ignore any courses that no longer # exist (because the course IDs have changed). Still, we don't delete those # enrollments, because it could have been a data push snafu. @@ -234,6 +241,8 @@ def dashboard(request): cert_statuses = { course.id: cert_info(request.user, course) for course in courses} + exam_registrations = { course.id: exam_registration_info(request.user, course) for course in courses} + # Get the 3 most recent news top_news = _get_news(top=3) @@ -244,6 +253,7 @@ def dashboard(request): 'show_courseware_links_for' : show_courseware_links_for, 'cert_statuses': cert_statuses, 'news': top_news, + 'exam_registrations': exam_registrations, } return render_to_response('dashboard.html', context) @@ -295,7 +305,7 @@ def change_enrollment(request): try: course = course_from_id(course_id) except ItemNotFoundError: - log.warning("User {0} tried to enroll in non-existant course {1}" + log.warning("User {0} tried to enroll in non-existent course {1}" .format(user.username, enrollment.course_id)) return {'success': False, 'error': 'The course requested does not exist.'} @@ -333,6 +343,14 @@ def change_enrollment(request): return {'success': False, 'error': 'We weren\'t able to unenroll you. Please try again.'} +@ensure_csrf_cookie +def accounts_login(request, error=""): + + + return render_to_response('accounts_login.html', { 'error': error }) + + + # Need different levels of logging @ensure_csrf_cookie def login_user(request, error=""): @@ -453,8 +471,9 @@ def _do_create_account(post_vars): try: profile.year_of_birth = int(post_vars['year_of_birth']) except (ValueError, KeyError): - profile.year_of_birth = None # If they give us garbage, just ignore it instead - # of asking them to put an integer. + # If they give us garbage, just ignore it instead + # of asking them to put an integer. + profile.year_of_birth = None try: profile.save() except Exception: @@ -586,6 +605,172 @@ def create_account(request, post_override=None): js = {'success': True} return HttpResponse(json.dumps(js), mimetype="application/json") +def exam_registration_info(user, course): + """ Returns a Registration object if the user is currently registered for a current + exam of the course. Returns None if the user is not registered, or if there is no + current exam for the course. + """ + exam_info = course.current_test_center_exam + if exam_info is None: + return None + + exam_code = exam_info.exam_series_code + registrations = get_testcenter_registration(user, course.id, exam_code) + if registrations: + registration = registrations[0] + else: + registration = None + return registration + +@login_required +@ensure_csrf_cookie +def begin_exam_registration(request, course_id): + """ Handles request to register the user for the current + test center exam of the specified course. Called by form + in dashboard.html. + """ + user = request.user + + try: + course = course_from_id(course_id) + except ItemNotFoundError: + log.error("User {0} enrolled in non-existent course {1}".format(user.username, course_id)) + raise Http404 + + # get the exam to be registered for: + # (For now, we just assume there is one at most.) + # if there is no exam now (because someone bookmarked this stupid page), + # then return a 404: + exam_info = course.current_test_center_exam + if exam_info is None: + raise Http404 + + # determine if the user is registered for this course: + registration = exam_registration_info(user, course) + + # we want to populate the registration page with the relevant information, + # if it already exists. Create an empty object otherwise. + try: + testcenteruser = TestCenterUser.objects.get(user=user) + except TestCenterUser.DoesNotExist: + testcenteruser = TestCenterUser() + testcenteruser.user = user + + context = {'course': course, + 'user': user, + 'testcenteruser': testcenteruser, + 'registration': registration, + 'exam_info': exam_info, + } + + return render_to_response('test_center_register.html', context) + +@ensure_csrf_cookie +def create_exam_registration(request, post_override=None): + ''' + JSON call to create a test center exam registration. + Called by form in test_center_register.html + ''' + post_vars = post_override if post_override else request.POST + + # first determine if we need to create a new TestCenterUser, or if we are making any update + # to an existing TestCenterUser. + username = post_vars['username'] + user = User.objects.get(username=username) + course_id = post_vars['course_id'] + course = course_from_id(course_id) # assume it will be found.... + + # make sure that any demographic data values received from the page have been stripped. + # Whitespace is not an acceptable response for any of these values + demographic_data = {} + for fieldname in TestCenterUser.user_provided_fields(): + if fieldname in post_vars: + demographic_data[fieldname] = (post_vars[fieldname]).strip() + + try: + testcenter_user = TestCenterUser.objects.get(user=user) + needs_updating = testcenter_user.needs_update(demographic_data) + log.info("User {0} enrolled in course {1} {2}updating demographic info for exam registration".format(user.username, course_id, "" if needs_updating else "not ")) + except TestCenterUser.DoesNotExist: + # do additional initialization here: + testcenter_user = TestCenterUser.create(user) + needs_updating = True + log.info("User {0} enrolled in course {1} creating demographic info for exam registration".format(user.username, course_id)) + + # perform validation: + if needs_updating: + # first perform validation on the user information + # using a Django Form. + form = TestCenterUserForm(instance=testcenter_user, data=demographic_data) + if form.is_valid(): + form.update_and_save() + else: + response_data = {'success': False} + # return a list of errors... + response_data['field_errors'] = form.errors + response_data['non_field_errors'] = form.non_field_errors() + return HttpResponse(json.dumps(response_data), mimetype="application/json") + + # create and save the registration: + needs_saving = False + exam = course.current_test_center_exam + exam_code = exam.exam_series_code + registrations = get_testcenter_registration(user, course_id, exam_code) + if registrations: + registration = registrations[0] + # NOTE: we do not bother to check here to see if the registration has changed, + # because at the moment there is no way for a user to change anything about their + # registration. They only provide an optional accommodation request once, and + # cannot make changes to it thereafter. + # It is possible that the exam_info content has been changed, such as the + # scheduled exam dates, but those kinds of changes should not be handled through + # this registration screen. + + else: + accommodation_request = post_vars.get('accommodation_request','') + registration = TestCenterRegistration.create(testcenter_user, exam, accommodation_request) + needs_saving = True + log.info("User {0} enrolled in course {1} creating new exam registration".format(user.username, course_id)) + + if needs_saving: + # do validation of registration. (Mainly whether an accommodation request is too long.) + form = TestCenterRegistrationForm(instance=registration, data=post_vars) + if form.is_valid(): + form.update_and_save() + else: + response_data = {'success': False} + # return a list of errors... + response_data['field_errors'] = form.errors + response_data['non_field_errors'] = form.non_field_errors() + return HttpResponse(json.dumps(response_data), mimetype="application/json") + + + # only do the following if there is accommodation text to send, + # and a destination to which to send it. + # TODO: still need to create the accommodation email templates +# if 'accommodation_request' in post_vars and 'TESTCENTER_ACCOMMODATION_REQUEST_EMAIL' in settings: +# d = {'accommodation_request': post_vars['accommodation_request'] } +# +# # composes accommodation email +# subject = render_to_string('emails/accommodation_email_subject.txt', d) +# # Email subject *must not* contain newlines +# subject = ''.join(subject.splitlines()) +# message = render_to_string('emails/accommodation_email.txt', d) +# +# try: +# dest_addr = settings['TESTCENTER_ACCOMMODATION_REQUEST_EMAIL'] +# from_addr = user.email +# send_mail(subject, message, from_addr, [dest_addr], fail_silently=False) +# except: +# log.exception(sys.exc_info()) +# response_data = {'success': False} +# response_data['non_field_errors'] = [ 'Could not send accommodation e-mail.', ] +# return HttpResponse(json.dumps(response_data), mimetype="application/json") + + + js = {'success': True} + return HttpResponse(json.dumps(js), mimetype="application/json") + def get_random_post_override(): """ @@ -641,7 +826,7 @@ def password_reset(request): # By default, Django doesn't allow Users with is_active = False to reset their passwords, # but this bites people who signed up a long time ago, never activated, and forgot their - # password. So for their sake, we'll auto-activate a user for whome password_reset is called. + # password. So for their sake, we'll auto-activate a user for whom password_reset is called. try: user = User.objects.get(email=request.POST['email']) user.is_active = True diff --git a/common/djangoapps/track/migrations/0001_initial.py b/common/djangoapps/track/migrations/0001_initial.py new file mode 100644 index 0000000000..0546203cf8 --- /dev/null +++ b/common/djangoapps/track/migrations/0001_initial.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'TrackingLog' + db.create_table('track_trackinglog', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), + ('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)), + ('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), + ('event', self.gf('django.db.models.fields.TextField')(blank=True)), + ('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)), + ('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)), + ('time', self.gf('django.db.models.fields.DateTimeField')()), + )) + db.send_create_signal('track', ['TrackingLog']) + + + def backwards(self, orm): + # Deleting model 'TrackingLog' + db.delete_table('track_trackinglog') + + + models = { + 'track.trackinglog': { + 'Meta': {'object_name': 'TrackingLog'}, + 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), + 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), + 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), + 'time': ('django.db.models.fields.DateTimeField', [], {}), + 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) + } + } + + complete_apps = ['track'] \ No newline at end of file diff --git a/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py new file mode 100644 index 0000000000..4c73aa3bfd --- /dev/null +++ b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding field 'TrackingLog.host' + db.add_column('track_trackinglog', 'host', + self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True), + keep_default=False) + + + # Changing field 'TrackingLog.event_type' + db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512)) + + # Changing field 'TrackingLog.page' + db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True)) + + def backwards(self, orm): + # Deleting field 'TrackingLog.host' + db.delete_column('track_trackinglog', 'host') + + + # Changing field 'TrackingLog.event_type' + db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32)) + + # Changing field 'TrackingLog.page' + db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)) + + models = { + 'track.trackinglog': { + 'Meta': {'object_name': 'TrackingLog'}, + 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), + 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), + 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}), + 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}), + 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), + 'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), + 'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), + 'time': ('django.db.models.fields.DateTimeField', [], {}), + 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) + } + } + + complete_apps = ['track'] \ No newline at end of file diff --git a/.gitmodules b/common/djangoapps/track/migrations/__init__.py similarity index 100% rename from .gitmodules rename to common/djangoapps/track/migrations/__init__.py diff --git a/common/djangoapps/track/models.py b/common/djangoapps/track/models.py index 401fa2832f..dfdf7a0558 100644 --- a/common/djangoapps/track/models.py +++ b/common/djangoapps/track/models.py @@ -7,11 +7,12 @@ class TrackingLog(models.Model): username = models.CharField(max_length=32,blank=True) ip = models.CharField(max_length=32,blank=True) event_source = models.CharField(max_length=32) - event_type = models.CharField(max_length=32,blank=True) + event_type = models.CharField(max_length=512,blank=True) event = models.TextField(blank=True) agent = models.CharField(max_length=256,blank=True) - page = models.CharField(max_length=32,blank=True,null=True) + page = models.CharField(max_length=512,blank=True,null=True) time = models.DateTimeField('event time') + host = models.CharField(max_length=64,blank=True) def __unicode__(self): s = "[%s] %s@%s: %s | %s | %s | %s" % (self.time, self.username, self.ip, self.event_source, diff --git a/common/djangoapps/track/views.py b/common/djangoapps/track/views.py index 434e75a63f..54bd476799 100644 --- a/common/djangoapps/track/views.py +++ b/common/djangoapps/track/views.py @@ -17,7 +17,7 @@ from track.models import TrackingLog log = logging.getLogger("tracking") -LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time'] +LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time','host'] def log_event(event): event_str = json.dumps(event) @@ -58,6 +58,7 @@ def user_track(request): "agent": agent, "page": request.GET['page'], "time": datetime.datetime.utcnow().isoformat(), + "host": request.META['SERVER_NAME'], } log_event(event) return HttpResponse('success') @@ -83,6 +84,7 @@ def server_track(request, event_type, event, page=None): "agent": agent, "page": page, "time": datetime.datetime.utcnow().isoformat(), + "host": request.META['SERVER_NAME'], } if event_type.startswith("/event_logs") and request.user.is_staff: # don't log diff --git a/common/djangoapps/util/json_request.py b/common/djangoapps/util/json_request.py index 169a7e3fb4..9458bff858 100644 --- a/common/djangoapps/util/json_request.py +++ b/common/djangoapps/util/json_request.py @@ -4,6 +4,11 @@ import json def expect_json(view_function): + """ + View decorator for simplifying handing of requests that expect json. If the request's + CONTENT_TYPE is application/json, parses the json dict from request.body, and updates + request.POST with the contents. + """ @wraps(view_function) def expect_json_with_cloned_request(request, *args, **kwargs): # cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information diff --git a/common/lib/capa/.coveragerc b/common/lib/capa/.coveragerc index 6af3218f75..149a4c860a 100644 --- a/common/lib/capa/.coveragerc +++ b/common/lib/capa/.coveragerc @@ -7,6 +7,7 @@ source = common/lib/capa ignore_errors = True [html] +title = Capa Python Test Coverage Report directory = reports/common/lib/capa/cover [xml] diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 451891d067..2eaa0e4286 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -33,6 +33,7 @@ from xml.sax.saxutils import unescape import chem import chem.chemcalc import chem.chemtools +import chem.miller import calc from correctmap import CorrectMap @@ -52,7 +53,7 @@ response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) solution_tags = ['solution'] # these get captured as student responses -response_properties = ["codeparam", "responseparam", "answer"] +response_properties = ["codeparam", "responseparam", "answer", "openendedparam"] # special problem tags which should be turned into innocuous HTML html_transforms = {'problem': {'tag': 'div'}, @@ -67,10 +68,11 @@ global_context = {'random': random, 'calc': calc, 'eia': eia, 'chemcalc': chem.chemcalc, - 'chemtools': chem.chemtools} + 'chemtools': chem.chemtools, + 'miller': chem.miller} # These should be removed from HTML output, including all subelements -html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"] +html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam","openendedrubric"] log = logging.getLogger('mitx.' + __name__) diff --git a/common/lib/capa/capa/chem/miller.py b/common/lib/capa/capa/chem/miller.py new file mode 100644 index 0000000000..4c10e60ecc --- /dev/null +++ b/common/lib/capa/capa/chem/miller.py @@ -0,0 +1,267 @@ +""" Calculation of Miller indices """ + +import numpy as np +import math +import fractions as fr +import decimal +import json + + +def lcm(a, b): + """ + Returns least common multiple of a, b + + Args: + a, b: floats + + Returns: + float + """ + return a * b / fr.gcd(a, b) + + +def segment_to_fraction(distance): + """ + Converts lengths of which the plane cuts the axes to fraction. + + Tries convert distance to closest nicest fraction with denominator less or + equal than 10. It is + purely for simplicity and clearance of learning purposes. Jenny: 'In typical + courses students usually do not encounter indices any higher than 6'. + + If distance is not a number (numpy nan), it means that plane is parallel to + axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is + returned + + Generally (special cases): + + a) if distance is smaller than some constant, i.g. 0.01011, + than fraction's denominator usually much greater than 10. + + b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane, + But if he will slightly move the mouse and click on 0.65 -> it will be + (16,15,16) plane. That's why we are doing adjustments for points coordinates, + to the closest tick, tick + tick / 2 value. And now UI sends to server only + values multiple to 0.05 (half of tick). Same rounding is implemented for + unittests. + + But if one will want to calculate miller indices with exact coordinates and + with nice fractions (which produce small Miller indices), he may want shift + to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero + in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin. + In this way he can recieve nice small fractions. Also there is can be + degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) - + it is a line. This case should be considered separately. Small nice Miller + numbers and possibility to create very small segments can not be implemented + at same time). + + + Args: + distance: float distance that plane cuts on axis, it must not be 0. + Distance is multiple of 0.05. + + Returns: + Inverted fraction. + 0 / 1 if distance is nan + + """ + if np.isnan(distance): + return fr.Fraction(0, 1) + else: + fract = fr.Fraction(distance).limit_denominator(10) + return fr.Fraction(fract.denominator, fract.numerator) + + +def sub_miller(segments): + ''' + Calculates Miller indices from segments. + + Algorithm: + + 1. Obtain inverted fraction from segments + + 2. Find common denominator of inverted fractions + + 3. Lead fractions to common denominator and throws denominator away. + + 4. Return obtained values. + + Args: + List of 3 floats, meaning distances that plane cuts on x, y, z axes. + Any float not equals zero, it means that plane does not intersect origin, + i. e. shift of origin has already been done. + + Returns: + String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) + ''' + fracts = [segment_to_fraction(segment) for segment in segments] + common_denominator = reduce(lcm, [fract.denominator for fract in fracts]) + miller = ([fract.numerator * math.fabs(common_denominator) / + fract.denominator for fract in fracts]) + return'(' + ','.join(map(str, map(decimal.Decimal, miller))) + ')' + + +def miller(points): + """ + Calculates Miller indices from points. + + Algorithm: + + 1. Calculate normal vector to a plane that goes trough all points. + + 2. Set origin. + + 3. Create Cartesian coordinate system (Ccs). + + 4. Find the lengths of segments of which the plane cuts the axes. Equation + of a line for axes: Origin + (Coordinate_vector - Origin) * parameter. + + 5. If plane goes trough Origin: + + a) Find new random origin: find unit cube vertex, not crossed by a plane. + + b) Repeat 2-4. + + c) Fix signs of segments after Origin shift. This means to consider + original directions of axes. I.g.: Origin was 0,0,0 and became + new_origin. If new_origin has same Y coordinate as Origin, then segment + does not change its sign. But if new_origin has another Y coordinate than + origin (was 0, became 1), than segment has to change its sign (it now + lies on negative side of Y axis). New Origin 0 value of X or Y or Z + coordinate means that segment does not change sign, 1 value -> does + change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1 + + 6. Run function that calculates miller indices from segments. + + Args: + List of points. Each point is list of float coordinates. Order of + coordinates in point's list: x, y, z. Points are different! + + Returns: + String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2) + """ + + N = np.cross(points[1] - points[0], points[2] - points[0]) + O = np.array([0, 0, 0]) + P = points[0] # point of plane + Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]]) + segments = ([np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else + np.nan for ort in Ccs]) + if any(x == 0 for x in segments): # Plane goes through origin. + vertices = [ # top: + np.array([1.0, 1.0, 1.0]), + np.array([0.0, 0.0, 1.0]), + np.array([1.0, 0.0, 1.0]), + np.array([0.0, 1.0, 1.0]), + # bottom, except 0,0,0: + np.array([1.0, 0.0, 0.0]), + np.array([0.0, 1.0, 0.0]), + np.array([1.0, 1.0, 1.0]), + ] + for vertex in vertices: + if np.dot(vertex - O, N) != 0: # vertex not in plane + new_origin = vertex + break + # obtain new axes with center in new origin + X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]]) + Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]]) + Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]]) + new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin] + segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if + np.dot(ort, N) != 0 else np.nan for ort in new_Ccs]) + # fix signs of indices: 0 -> 1, 1 -> -1 ( + segments = (1 - 2 * new_origin) * segments + + return sub_miller(segments) + + +def grade(user_input, correct_answer): + ''' + Grade crystallography problem. + + Returns true if lattices are the same and Miller indices are same or minus + same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only + on student's selection of origin. + + Args: + user_input, correct_answer: json. Format: + + user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"], + ["0.78","1.00","0.00"],["0.00","1.00","0.72"]]} + + correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'} + + "lattice" is one of: "", "sc", "bcc", "fcc" + + Returns: + True or false. + ''' + def negative(m): + """ + Change sign of Miller indices. + + Args: + m: string with meaning of Miller indices. E.g.: + (-6,3,-6) -> (6, -3, 6) + + Returns: + String with changed signs. + """ + output = '' + i = 1 + while i in range(1, len(m) - 1): + if m[i] in (',', ' '): + output += m[i] + elif m[i] not in ('-', '0'): + output += '-' + m[i] + elif m[i] == '0': + output += m[i] + else: + i += 1 + output += m[i] + i += 1 + return '(' + output + ')' + + def round0_25(point): + """ + Rounds point coordinates to closest 0.5 value. + + Args: + point: list of float coordinates. Order of coordinates: x, y, z. + + Returns: + list of coordinates rounded to closes 0.5 value + """ + rounded_points = [] + for coord in point: + base = math.floor(coord * 10) + fractional_part = (coord * 10 - base) + aliquot0_25 = math.floor(fractional_part / 0.25) + if aliquot0_25 == 0.0: + rounded_points.append(base / 10) + if aliquot0_25 in (1.0, 2.0): + rounded_points.append(base / 10 + 0.05) + if aliquot0_25 == 3.0: + rounded_points.append(base / 10 + 0.1) + return rounded_points + + user_answer = json.loads(user_input) + + if user_answer['lattice'] != correct_answer['lattice']: + return False + + points = [map(float, p) for p in user_answer['points']] + + if len(points) < 3: + return False + + # round point to closes 0.05 value + points = [round0_25(point) for point in points] + + points = [np.array(point) for point in points] + # print miller(points), (correct_answer['miller'].replace(' ', ''), + # negative(correct_answer['miller']).replace(' ', '')) + if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')): + return True + + return False diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py index 34d903ec1d..571526f915 100644 --- a/common/lib/capa/capa/chem/tests.py +++ b/common/lib/capa/capa/chem/tests.py @@ -1,13 +1,15 @@ import codecs from fractions import Fraction -from pyparsing import ParseException import unittest from chemcalc import (compare_chemical_expression, divide_chemical_expression, render_to_html, chemical_equations_equal) +import miller + local_debug = None + def log(s, output_type=None): if local_debug: print s @@ -37,7 +39,6 @@ class Test_Compare_Equations(unittest.TestCase): self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2', '2O2 + 2H2 -> 2H2O2')) - def test_different_arrows(self): self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', '2O2 + 2H2 -> 2H2O2')) @@ -56,7 +57,6 @@ class Test_Compare_Equations(unittest.TestCase): self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2', exact=True)) - def test_syntax_errors(self): self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2', '2O2 + 2H2 -> 2H2O2')) @@ -311,7 +311,6 @@ class Test_Render_Equations(unittest.TestCase): log(out + ' ------- ' + correct, 'html') self.assertEqual(out, correct) - def test_render_eq3(self): s = "H^+ + OH^- <= H2O" # unsupported arrow out = render_to_html(s) @@ -320,10 +319,148 @@ class Test_Render_Equations(unittest.TestCase): self.assertEqual(out, correct) +class Test_Crystallography_Miller(unittest.TestCase): + ''' Tests for crystallography grade function.''' + + def test_empty_points(self): + user_input = '{"lattice": "bcc", "points": []}' + self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'})) + + def test_only_one_point(self): + user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"]]}' + self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'})) + + def test_only_two_points(self): + user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"]]}' + self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'})) + + def test_1(self): + user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"], ["0.00", "0.00", "0.50"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'})) + + def test_2(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,1,1)', 'lattice': 'bcc'})) + + def test_3(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.50", "1.00"], ["1.00", "1.00", "0.50"], ["0.50", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'})) + + def test_4(self): + user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.664", "0.00"], ["0.00", "1.00", "0.33"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(-3, 3, -3)', 'lattice': 'bcc'})) + + def test_5(self): + """ return true only in case points coordinates are exact. + But if they transform to closest 0.05 value it is not true""" + user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.33", "0.00"], ["0.00", "1.00", "0.33"]]}' + self.assertFalse(miller.grade(user_input, {'miller': '(-6,3,-6)', 'lattice': 'bcc'})) + + def test_6(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.25", "0.00"], ["0.25", "0.00", "0.00"], ["0.00", "0.00", "0.25"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(4,4,4)', 'lattice': 'bcc'})) + + def test_7(self): # goes throug origin + user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "0.00", "0.00"], ["0.50", "1.00", "0.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,0,-1)', 'lattice': 'bcc'})) + + def test_8(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.50"], ["1.00", "0.00", "0.50"], ["0.50", "1.00", "0.50"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,0,2)', 'lattice': 'bcc'})) + + def test_9(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "1.00", "1.00"], ["1.00", "0.00", "0.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,1,0)', 'lattice': 'bcc'})) + + def test_10(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "0.00", "0.00"], ["0.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'})) + + def test_11(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "0.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,1,2)', 'lattice': 'bcc'})) + + def test_12(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["0.00", "0.00", "0.50"], ["1.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,1,-2)', 'lattice': 'bcc'})) + + def test_13(self): + user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.50", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(2,0,1)', 'lattice': 'bcc'})) + + def test_14(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "0.00", "1.00"], ["0.50", "1.00", "0.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(2,-1,0)', 'lattice': 'bcc'})) + + def test_15(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'})) + + def test_16(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'})) + + def test_17(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "1.00"], ["1.00", "1.00", "0.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,1)', 'lattice': 'bcc'})) + + def test_18(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'})) + + def test_19(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,0)', 'lattice': 'bcc'})) + + def test_20(self): + user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,0,1)', 'lattice': 'bcc'})) + + def test_21(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(-1,0,1)', 'lattice': 'bcc'})) + + def test_22(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,1,1)', 'lattice': 'bcc'})) + + def test_23(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,1)', 'lattice': 'bcc'})) + + def test_24(self): + user_input = '{"lattice": "bcc", "points": [["0.66", "0.00", "0.00"], ["0.00", "0.66", "0.00"], ["0.00", "0.00", "0.66"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'bcc'})) + + def test_25(self): + user_input = u'{"lattice":"","points":[["0.00","0.00","0.01"],["1.00","1.00","0.01"],["0.00","1.00","1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': ''})) + + def test_26(self): + user_input = u'{"lattice":"","points":[["0.00","0.01","0.00"],["1.00","0.00","0.00"],["0.00","0.00","1.00"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,0)', 'lattice': ''})) + + def test_27(self): + """ rounding to 0.35""" + user_input = u'{"lattice":"","points":[["0.33","0.00","0.00"],["0.00","0.33","0.00"],["0.00","0.00","0.33"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': ''})) + + def test_28(self): + """ rounding to 0.30""" + user_input = u'{"lattice":"","points":[["0.30","0.00","0.00"],["0.00","0.30","0.00"],["0.00","0.00","0.30"]]}' + self.assertTrue(miller.grade(user_input, {'miller': '(10,10,10)', 'lattice': ''})) + + def test_wrong_lattice(self): + user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}' + self.assertFalse(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'fcc'})) + def suite(): - testcases = [Test_Compare_Expressions, Test_Divide_Expressions, Test_Render_Equations] + testcases = [Test_Compare_Expressions, + Test_Divide_Expressions, + Test_Render_Equations, + Test_Crystallography_Miller] suites = [] for testcase in testcases: suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase)) diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 0b2250f98d..1d3646fefc 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -671,18 +671,15 @@ class Crystallography(InputTypeBase): """ Note: height, width are required. """ - return [Attribute('size', None), - Attribute('height'), + return [Attribute('height'), Attribute('width'), - - # can probably be removed (textline should prob be always-hidden) - Attribute('hidden', ''), ] registry.register(Crystallography) # ------------------------------------------------------------------------- + class VseprInput(InputTypeBase): """ Input for molecular geometry--show possible structures, let student @@ -736,3 +733,5 @@ class ChemicalEquationInput(InputTypeBase): return {'previewer': '/static/js/capa/chemical_equation_preview.js',} registry.register(ChemicalEquationInput) + +#----------------------------------------------------------------------------- diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index b990c489b3..3d97cb0bea 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -8,22 +8,25 @@ Used by capa_problem.py ''' # standard library imports +import abc import cgi +import hashlib import inspect import json import logging import numbers import numpy +import os import random import re import requests -import traceback -import hashlib -import abc -import os import subprocess +import traceback import xml.sax.saxutils as saxutils +from collections import namedtuple +from shapely.geometry import Point, MultiPoint + # specific library imports from calc import evaluator, UndefinedVariable from correctmap import CorrectMap @@ -626,7 +629,7 @@ class MultipleChoiceResponse(LoncapaResponse): # define correct choices (after calling secondary setup) xml = self.xml cxml = xml.xpath('//*[@id=$id]//choice[@correct="true"]', id=xml.get('id')) - self.correct_choices = [choice.get('name') for choice in cxml] + self.correct_choices = [contextualize_text(choice.get('name'), self.context) for choice in cxml] def mc_setup_response(self): ''' @@ -720,7 +723,7 @@ class OptionResponse(LoncapaResponse): return cmap def get_answers(self): - amap = dict([(af.get('id'), af.get('correct')) for af in self.answer_fields]) + amap = dict([(af.get('id'), contextualize_text(af.get('correct'), self.context)) for af in self.answer_fields]) # log.debug('%s: expected answers=%s' % (unicode(self),amap)) return amap @@ -1100,6 +1103,15 @@ class SymbolicResponse(CustomResponse): #----------------------------------------------------------------------------- +""" +valid: Flag indicating valid score_msg format (Boolean) +correct: Correctness of submission (Boolean) +score: Points to be assigned (numeric, can be float) +msg: Message from grader to display to student (string) +""" +ScoreMessage = namedtuple('ScoreMessage', + ['valid', 'correct', 'points', 'msg']) + class CodeResponse(LoncapaResponse): """ @@ -1139,7 +1151,7 @@ class CodeResponse(LoncapaResponse): else: self._parse_coderesponse_xml(codeparam) - def _parse_coderesponse_xml(self,codeparam): + def _parse_coderesponse_xml(self, codeparam): ''' Parse the new CodeResponse XML format. When successful, sets: self.initial_display @@ -1151,17 +1163,9 @@ class CodeResponse(LoncapaResponse): grader_payload = grader_payload.text if grader_payload is not None else '' self.payload = {'grader_payload': grader_payload} - answer_display = codeparam.find('answer_display') - if answer_display is not None: - self.answer = answer_display.text - else: - self.answer = 'No answer provided.' - - initial_display = codeparam.find('initial_display') - if initial_display is not None: - self.initial_display = initial_display.text - else: - self.initial_display = '' + self.initial_display = find_with_default(codeparam, 'initial_display', '') + self.answer = find_with_default(codeparam, 'answer_display', + 'No answer provided.') def _parse_externalresponse_xml(self): ''' @@ -1308,8 +1312,6 @@ class CodeResponse(LoncapaResponse): # Sanity check on returned points if points < 0: points = 0 - elif points > self.maxpoints[self.answer_id]: - points = self.maxpoints[self.answer_id] # Queuestate is consumed oldcmap.set(self.answer_id, npoints=points, correctness=correctness, msg=msg.replace(' ', ' '), queuestate=None) @@ -1717,15 +1719,38 @@ class ImageResponse(LoncapaResponse): which produces an [x,y] coordinate pair. The click is correct if it falls within a region specified. This region is a union of rectangles. - Lon-CAPA requires that each has a inside it. That - doesn't make sense to me (Ike). Instead, let's have it such that - should contain one or more stanzas. Each should specify - a rectangle, given as an attribute, defining the correct answer. + Lon-CAPA requires that each has a inside it. + That doesn't make sense to me (Ike). Instead, let's have it such that + should contain one or more stanzas. + Each should specify a rectangle(s) or region(s), given as an + attribute, defining the correct answer. + + + + Regions is list of lists [region1, region2, region3, ...] where regionN + is disordered list of points: [[1,1], [100,100], [50,50], [20, 70]]. + + If there is only one region in the list, simpler notation can be used: + regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly + setting outer list) + + Returns: + True, if click is inside any region or rectangle. Otherwise False. """ snippets = [{'snippet': ''' - - - + + + + + '''}] response_tag = 'imageresponse' @@ -1733,19 +1758,17 @@ class ImageResponse(LoncapaResponse): def setup_response(self): self.ielements = self.inputfields - self.answer_ids = [ie.get('id') for ie in self.ielements] + self.answer_ids = [ie.get('id') for ie in self.ielements] def get_score(self, student_answers): correct_map = CorrectMap() expectedset = self.get_answers() - - for aid in self.answer_ids: # loop through IDs of fields in our stanza - given = student_answers[aid] # this should be a string of the form '[x,y]' - + for aid in self.answer_ids: # loop through IDs of + # fields in our stanza + given = student_answers[aid] # this should be a string of the form '[x,y]' correct_map.set(aid, 'incorrect') - if not given: # No answer to parse. Mark as incorrect and move on + if not given: # No answer to parse. Mark as incorrect and move on continue - # parse given answer m = re.match('\[([0-9]+),([0-9]+)]', given.strip().replace(' ', '')) if not m: @@ -1753,29 +1776,44 @@ class ImageResponse(LoncapaResponse): 'error grading %s (input=%s)' % (aid, given)) (gx, gy) = [int(x) for x in m.groups()] - # Check whether given point lies in any of the solution rectangles - solution_rectangles = expectedset[aid].split(';') - for solution_rectangle in solution_rectangles: - # parse expected answer - # TODO: Compile regexp on file load - m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]', - solution_rectangle.strip().replace(' ', '')) - if not m: - msg = 'Error in problem specification! cannot parse rectangle in %s' % ( - etree.tostring(self.ielements[aid], pretty_print=True)) - raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg) - (llx, lly, urx, ury) = [int(x) for x in m.groups()] - - # answer is correct if (x,y) is within the specified rectangle - if (llx <= gx <= urx) and (lly <= gy <= ury): - correct_map.set(aid, 'correct') - break + rectangles, regions = expectedset + if rectangles[aid]: # rectangles part - for backward compatibility + # Check whether given point lies in any of the solution rectangles + solution_rectangles = rectangles[aid].split(';') + for solution_rectangle in solution_rectangles: + # parse expected answer + # TODO: Compile regexp on file load + m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]', + solution_rectangle.strip().replace(' ', '')) + if not m: + msg = 'Error in problem specification! cannot parse rectangle in %s' % ( + etree.tostring(self.ielements[aid], pretty_print=True)) + raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg) + (llx, lly, urx, ury) = [int(x) for x in m.groups()] + # answer is correct if (x,y) is within the specified rectangle + if (llx <= gx <= urx) and (lly <= gy <= ury): + correct_map.set(aid, 'correct') + break + if correct_map[aid]['correctness'] != 'correct' and regions[aid]: + parsed_region = json.loads(regions[aid]) + if parsed_region: + if type(parsed_region[0][0]) != list: + # we have [[1,2],[3,4],[5,6]] - single region + # instead of [[[1,2],[3,4],[5,6], [[1,2],[3,4],[5,6]]] + # or [[[1,2],[3,4],[5,6]]] - multiple regions syntax + parsed_region = [parsed_region] + for region in parsed_region: + polygon = MultiPoint(region).convex_hull + if (polygon.type == 'Polygon' and + polygon.contains(Point(gx, gy))): + correct_map.set(aid, 'correct') + break return correct_map def get_answers(self): - return dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]) - + return (dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]), + dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements])) #----------------------------------------------------------------------------- # TEMPORARY: List of all response subclasses # FIXME: To be replaced by auto-registration diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html index 2370f59dd2..8dcbff354b 100644 --- a/common/lib/capa/capa/templates/crystallography.html +++ b/common/lib/capa/capa/templates/crystallography.html @@ -1,34 +1,28 @@
-
+
+ +
+ Lattice: +
+
-
- % if status == 'unsubmitted': -
+
% elif status == 'correct': -
+
% elif status == 'incorrect': -
+
% elif status == 'incomplete': -
- % endif - % if hidden: -
+
% endif - -

+ + +

% if status == 'unsubmitted': unanswered % elif status == 'correct': @@ -38,14 +32,15 @@ % elif status == 'incomplete': incomplete % endif -

+

-

+

- % if msg: - ${msg|n} - % endif -% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: -
-% endif + % if msg: + ${msg|n} + % endif + + % if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: +
+ % endif
diff --git a/common/lib/capa/capa/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml index 34dba37e3b..41c9f01218 100644 --- a/common/lib/capa/capa/tests/test_files/imageresponse.xml +++ b/common/lib/capa/capa/tests/test_files/imageresponse.xml @@ -18,4 +18,23 @@ Hello

Use conservation of energy.

+ + + + + + + +Click on either of the two positions as discussed previously. + +Click on either of the two positions as discussed previously. + + +Click on either of the two positions as discussed previously. + +

Use conservation of energy.

+
+
+ + diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py index 826d304717..dafd31bdc7 100644 --- a/common/lib/capa/capa/tests/test_inputtypes.py +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -407,13 +407,11 @@ class CrystallographyTest(unittest.TestCase): def test_rendering(self): height = '12' width = '33' - size = '10' xml_str = """""".format(h=height, w=width, s=size) + />""".format(h=height, w=width) element = etree.fromstring(xml_str) @@ -428,9 +426,7 @@ class CrystallographyTest(unittest.TestCase): expected = {'id': 'prob_1_2', 'value': value, 'status': 'unsubmitted', - 'size': size, 'msg': '', - 'hidden': '', 'width': width, 'height': height, } diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py index bcac555b5e..9eecef3986 100644 --- a/common/lib/capa/capa/tests/test_responsetypes.py +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -52,24 +52,57 @@ class ImageResponseTest(unittest.TestCase): def test_ir_grade(self): imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) - correct_answers = {'1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)', - '1_2_3': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_4': '(490,11)-(556,98);(242,202)-(296,276)', - '1_2_5': '(490,11)-(556,98);(242,202)-(296,276)', + # testing regions only + correct_answers = { + #regions + '1_2_1': '(490,11)-(556,98)', + '1_2_2': '(242,202)-(296,276)', + '1_2_3': '(490,11)-(556,98);(242,202)-(296,276)', + '1_2_4': '(490,11)-(556,98);(242,202)-(296,276)', + '1_2_5': '(490,11)-(556,98);(242,202)-(296,276)', + #testing regions and rectanges + '1_3_1': 'rectangle="(490,11)-(556,98)" \ + regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', + '1_3_2': 'rectangle="(490,11)-(556,98)" \ + regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', + '1_3_3': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', + '1_3_4': 'regions="[[[10,10], [20,10], [20, 30]], [[100,100], [120,100], [120,150]]]"', + '1_3_5': 'regions="[[[10,10], [20,10], [20, 30]]]"', + '1_3_6': 'regions="[[10,10], [30,30], [15, 15]]"', + '1_3_7': 'regions="[[10,10], [30,30], [10, 30], [30, 10]]"', } - test_answers = {'1_2_1': '[500,20]', - '1_2_2': '[250,300]', - '1_2_3': '[500,20]', - '1_2_4': '[250,250]', - '1_2_5': '[10,10]', + test_answers = { + '1_2_1': '[500,20]', + '1_2_2': '[250,300]', + '1_2_3': '[500,20]', + '1_2_4': '[250,250]', + '1_2_5': '[10,10]', + + '1_3_1': '[500,20]', + '1_3_2': '[15,15]', + '1_3_3': '[500,20]', + '1_3_4': '[115,115]', + '1_3_5': '[15,15]', + '1_3_6': '[20,20]', + '1_3_7': '[20,15]', } + + # regions self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_3'), 'correct') self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_4'), 'correct') self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_5'), 'incorrect') + # regions and rectangles + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_2'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_3'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_4'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_5'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_6'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_7'), 'correct') + class SymbolicResponseTest(unittest.TestCase): def test_sr_grade(self): diff --git a/common/lib/capa/capa/util.py b/common/lib/capa/capa/util.py index 10e984611b..0df58c216f 100644 --- a/common/lib/capa/capa/util.py +++ b/common/lib/capa/capa/util.py @@ -65,3 +65,25 @@ def is_file(file_to_test): Duck typing to check if 'file_to_test' is a File object ''' return all(hasattr(file_to_test, method) for method in ['read', 'name']) + + +def find_with_default(node, path, default): + """ + Look for a child of node using , and return its text if found. + Otherwise returns default. + + Arguments: + node: lxml node + path: xpath search expression + default: value to return if nothing found + + Returns: + node.find(path).text if the find succeeds, default otherwise. + + """ + v = node.find(path) + if v is not None: + return v.text + else: + return default + diff --git a/common/lib/capa/capa/xqueue_interface.py b/common/lib/capa/capa/xqueue_interface.py index 0214488cce..798867955b 100644 --- a/common/lib/capa/capa/xqueue_interface.py +++ b/common/lib/capa/capa/xqueue_interface.py @@ -49,6 +49,7 @@ def parse_xreply(xreply): return_code = xreply['return_code'] content = xreply['content'] + return (return_code, content) @@ -80,7 +81,11 @@ class XQueueInterface(object): # Log in, then try again if error and (msg == 'login_required'): - self._login() + (error, content) = self._login() + if error != 0: + # when the login fails + log.debug("Failed to login to queue: %s", content) + return (error, content) if files_to_upload is not None: # Need to rewind file pointers for f in files_to_upload: diff --git a/common/lib/xmodule/.coveragerc b/common/lib/xmodule/.coveragerc index 310c8e778b..baadd30829 100644 --- a/common/lib/xmodule/.coveragerc +++ b/common/lib/xmodule/.coveragerc @@ -7,6 +7,7 @@ source = common/lib/xmodule ignore_errors = True [html] +title = XModule Python Test Coverage Report directory = reports/common/lib/xmodule/cover [xml] diff --git a/common/lib/xmodule/setup.py b/common/lib/xmodule/setup.py index d3889bc388..86636ef05a 100644 --- a/common/lib/xmodule/setup.py +++ b/common/lib/xmodule/setup.py @@ -19,6 +19,7 @@ setup( "abtest = xmodule.abtest_module:ABTestDescriptor", "book = xmodule.backcompat_module:TranslateCustomTagDescriptor", "chapter = xmodule.seq_module:SequenceDescriptor", + "combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor", "course = xmodule.course_module:CourseDescriptor", "customtag = xmodule.template_module:CustomTagDescriptor", "discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor", @@ -28,7 +29,6 @@ setup( "problem = xmodule.capa_module:CapaDescriptor", "problemset = xmodule.seq_module:SequenceDescriptor", "section = xmodule.backcompat_module:SemanticSectionDescriptor", - "selfassessment = xmodule.self_assessment_module:SelfAssessmentDescriptor", "sequential = xmodule.seq_module:SequenceDescriptor", "slides = xmodule.backcompat_module:TranslateCustomTagDescriptor", "vertical = xmodule.vertical_module:VerticalDescriptor", @@ -36,6 +36,7 @@ setup( "videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor", "videosequence = xmodule.seq_module:SequenceDescriptor", "discussion = xmodule.discussion_module:DiscussionDescriptor", + "graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor", ] } ) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 47d5d5c423..1da271072a 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -146,6 +146,11 @@ class CapaModule(XModule): else: self.seed = None + # Need the problem location in openendedresponse to send out. Adding + # it to the system here seems like the least clunky way to get it + # there. + self.system.set('location', self.location.url()) + try: # TODO (vshnayder): move as much as possible of this work and error # checking to descriptor load time @@ -425,6 +430,7 @@ class CapaModule(XModule): return False + def update_score(self, get): """ Delivers grading response (e.g. from asynchronous code checking) to diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py new file mode 100644 index 0000000000..a88acc6ffd --- /dev/null +++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py @@ -0,0 +1,598 @@ +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .x_module import XModule +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +import self_assessment_module +import open_ended_module + +from mitxmako.shortcuts import render_to_string + +log = logging.getLogger("mitx.courseware") + +# Set the default number of max attempts. Should be 1 for production +# Set higher for debugging/testing +# attempts specified in xml definition overrides this. +MAX_ATTEMPTS = 10000 + +# Set maximum available number of points. +# Overriden by max_score specified in xml. +MAX_SCORE = 1 + +class CombinedOpenEndedModule(XModule): + """ + This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). + It transitions between problems, and support arbitrary ordering. + Each combined open ended module contains one or multiple "child" modules. + Child modules track their own state, and can transition between states. They also implement get_html and + handle_ajax. + The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess + ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) + ajax actions implemented by all children are: + 'save_answer' -- Saves the student answer + 'save_assessment' -- Saves the student assessment (or external grader assessment) + 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) + ajax actions implemented by combined open ended module are: + 'reset' -- resets the whole combined open ended module and returns to the first child module + 'next_problem' -- moves to the next child module + 'get_results' -- gets results from a given child module + + Types of children. Task is synonymous with child module, so each combined open ended module + incorporates multiple children (tasks): + openendedmodule + selfassessmentmodule + """ + STATE_VERSION = 1 + + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + INTERMEDIATE_DONE = 'intermediate_done' + DONE = 'done' + + js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'), + resource_string(__name__, 'js/src/collapsible.coffee'), + resource_string(__name__, 'js/src/javascript_loader.coffee'), + ]} + js_module_name = "CombinedOpenEnded" + + css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} + + def __init__(self, system, location, definition, descriptor, + instance_state=None, shared_state=None, **kwargs): + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + """ + Definition file should have one or many task blocks, a rubric block, and a prompt block: + + Sample file: + + + Blah blah rubric. + + + Some prompt. + + + + + What hint about this problem would you give to someone? + + + Save Succcesful. Thanks for participating! + + + + + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", + "problem_id" : "6.002x/Welcome/OETest"} + + + + + + """ + + # Load instance state + if instance_state is not None: + instance_state = json.loads(instance_state) + else: + instance_state = {} + + #We need to set the location here so the child modules can use it + system.set('location', location) + + #Tells the system which xml definition to load + self.current_task_number = instance_state.get('current_task_number', 0) + #This loads the states of the individual children + self.task_states = instance_state.get('task_states', []) + #Overall state of the combined open ended module + self.state = instance_state.get('state', self.INITIAL) + + self.attempts = instance_state.get('attempts', 0) + + #Allow reset is true if student has failed the criteria to move to the next child task + self.allow_reset = instance_state.get('ready_to_reset', False) + self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) + + # Used for progress / grading. Currently get credit just for + # completion (doesn't matter if you self-assessed correct/incorrect). + self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) + + #Static data is passed to the child modules to render + self.static_data = { + 'max_score': self._max_score, + 'max_attempts': self.max_attempts, + 'prompt': definition['prompt'], + 'rubric': definition['rubric'] + } + + self.task_xml = definition['task_xml'] + self.setup_next_task() + + def get_tag_name(self, xml): + """ + Gets the tag name of a given xml block. + Input: XML string + Output: The name of the root tag + """ + tag = etree.fromstring(xml).tag + return tag + + def overwrite_state(self, current_task_state): + """ + Overwrites an instance state and sets the latest response to the current response. This is used + to ensure that the student response is carried over from the first child to the rest. + Input: Task state json string + Output: Task state json string + """ + last_response_data = self.get_last_response(self.current_task_number - 1) + last_response = last_response_data['response'] + + loaded_task_state = json.loads(current_task_state) + if loaded_task_state['state'] == self.INITIAL: + loaded_task_state['state'] = self.ASSESSING + loaded_task_state['created'] = True + loaded_task_state['history'].append({'answer': last_response}) + current_task_state = json.dumps(loaded_task_state) + return current_task_state + + def child_modules(self): + """ + Returns the constructors associated with the child modules in a dictionary. This makes writing functions + simpler (saves code duplication) + Input: None + Output: A dictionary of dictionaries containing the descriptor functions and module functions + """ + child_modules = { + 'openended': open_ended_module.OpenEndedModule, + 'selfassessment': self_assessment_module.SelfAssessmentModule, + } + child_descriptors = { + 'openended': open_ended_module.OpenEndedDescriptor, + 'selfassessment': self_assessment_module.SelfAssessmentDescriptor, + } + children = { + 'modules': child_modules, + 'descriptors': child_descriptors, + } + return children + + def setup_next_task(self, reset=False): + """ + Sets up the next task for the module. Creates an instance state if none exists, carries over the answer + from the last instance state to the next if needed. + Input: A boolean indicating whether or not the reset function is calling. + Output: Boolean True (not useful right now) + """ + current_task_state = None + if len(self.task_states) > self.current_task_number: + current_task_state = self.task_states[self.current_task_number] + + self.current_task_xml = self.task_xml[self.current_task_number] + + if self.current_task_number > 0: + self.allow_reset = self.check_allow_reset() + if self.allow_reset: + self.current_task_number = self.current_task_number - 1 + + current_task_type = self.get_tag_name(self.current_task_xml) + + children = self.child_modules() + child_task_module = children['modules'][current_task_type] + + self.current_task_descriptor = children['descriptors'][current_task_type](self.system) + + #This is the xml object created from the xml definition of the current task + etree_xml = etree.fromstring(self.current_task_xml) + + #This sends the etree_xml object through the descriptor module of the current task, and + #returns the xml parsed by the descriptor + self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system) + if current_task_state is None and self.current_task_number == 0: + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data) + self.task_states.append(self.current_task.get_instance_state()) + self.state = self.ASSESSING + elif current_task_state is None and self.current_task_number > 0: + last_response_data = self.get_last_response(self.current_task_number - 1) + last_response = last_response_data['response'] + current_task_state=json.dumps({ + 'state' : self.ASSESSING, + 'version' : self.STATE_VERSION, + 'max_score' : self._max_score, + 'attempts' : 0, + 'created' : True, + 'history' : [{'answer' : str(last_response)}], + }) + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, + instance_state=current_task_state) + self.task_states.append(self.current_task.get_instance_state()) + self.state = self.ASSESSING + else: + if self.current_task_number > 0 and not reset: + current_task_state = self.overwrite_state(current_task_state) + self.current_task = child_task_module(self.system, self.location, + self.current_task_parsed_xml, self.current_task_descriptor, self.static_data, + instance_state=current_task_state) + + log.debug(current_task_state) + return True + + def check_allow_reset(self): + """ + Checks to see if the student has passed the criteria to move to the next module. If not, sets + allow_reset to true and halts the student progress through the tasks. + Input: None + Output: the allow_reset attribute of the current module. + """ + if not self.allow_reset: + if self.current_task_number > 0: + last_response_data = self.get_last_response(self.current_task_number - 1) + current_response_data = self.get_current_attributes(self.current_task_number) + + if(current_response_data['min_score_to_attempt'] > last_response_data['score'] + or current_response_data['max_score_to_attempt'] < last_response_data['score']): + self.state = self.DONE + self.allow_reset = True + + return self.allow_reset + + def get_context(self): + """ + Generates a context dictionary that is used to render html. + Input: None + Output: A dictionary that can be rendered into the combined open ended template. + """ + task_html = self.get_html_base() + #set context variables and render template + + context = { + 'items': [{'content': task_html}], + 'ajax_url': self.system.ajax_url, + 'allow_reset': self.allow_reset, + 'state': self.state, + 'task_count': len(self.task_xml), + 'task_number': self.current_task_number + 1, + 'status': self.get_status(), + } + + return context + + def get_html(self): + """ + Gets HTML for rendering. + Input: None + Output: rendered html + """ + context = self.get_context() + html = self.system.render_template('combined_open_ended.html', context) + return html + + def get_html_nonsystem(self): + """ + Gets HTML for rendering via AJAX. Does not use system, because system contains some additional + html, which is not appropriate for returning via ajax calls. + Input: None + Output: HTML rendered directly via Mako + """ + context = self.get_context() + html = render_to_string('combined_open_ended.html', context) + return html + + def get_html_base(self): + """ + Gets the HTML associated with the current child task + Input: None + Output: Child task HTML + """ + self.update_task_states() + html = self.current_task.get_html(self.system) + return_html = rewrite_links(html, self.rewrite_content_links) + return return_html + + def get_current_attributes(self, task_number): + """ + Gets the min and max score to attempt attributes of the specified task. + Input: The number of the task. + Output: The minimum and maximum scores needed to move on to the specified task. + """ + task_xml = self.task_xml[task_number] + etree_xml = etree.fromstring(task_xml) + min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) + max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) + return {'min_score_to_attempt': min_score_to_attempt, 'max_score_to_attempt': max_score_to_attempt} + + def get_last_response(self, task_number): + """ + Returns data associated with the specified task number, such as the last response, score, etc. + Input: The number of the task. + Output: A dictionary that contains information about the specified task. + """ + last_response = "" + task_state = self.task_states[task_number] + task_xml = self.task_xml[task_number] + task_type = self.get_tag_name(task_xml) + + children = self.child_modules() + + task_descriptor = children['descriptors'][task_type](self.system) + etree_xml = etree.fromstring(task_xml) + + min_score_to_attempt = int(etree_xml.attrib.get('min_score_to_attempt', 0)) + max_score_to_attempt = int(etree_xml.attrib.get('max_score_to_attempt', self._max_score)) + + task_parsed_xml = task_descriptor.definition_from_xml(etree_xml, self.system) + task = children['modules'][task_type](self.system, self.location, task_parsed_xml, task_descriptor, + self.static_data, instance_state=task_state) + last_response = task.latest_answer() + last_score = task.latest_score() + last_post_assessment = task.latest_post_assessment() + last_post_feedback = "" + if task_type == "openended": + last_post_assessment = task.latest_post_assessment(short_feedback=False, join_feedback=False) + if isinstance(last_post_assessment, list): + eval_list = [] + for i in xrange(0, len(last_post_assessment)): + eval_list.append(task.format_feedback_with_evaluation(last_post_assessment[i])) + last_post_evaluation = "".join(eval_list) + else: + last_post_evaluation = task.format_feedback_with_evaluation(last_post_assessment) + last_post_assessment = last_post_evaluation + last_correctness = task.is_last_response_correct() + max_score = task.max_score() + state = task.state + last_response_dict = { + 'response': last_response, + 'score': last_score, + 'post_assessment': last_post_assessment, + 'type': task_type, + 'max_score': max_score, + 'state': state, + 'human_state': task.HUMAN_NAMES[state], + 'correct': last_correctness, + 'min_score_to_attempt': min_score_to_attempt, + 'max_score_to_attempt': max_score_to_attempt, + } + + return last_response_dict + + def update_task_states(self): + """ + Updates the task state of the combined open ended module with the task state of the current child module. + Input: None + Output: boolean indicating whether or not the task state changed. + """ + changed = False + if not self.allow_reset: + self.task_states[self.current_task_number] = self.current_task.get_instance_state() + current_task_state = json.loads(self.task_states[self.current_task_number]) + if current_task_state['state'] == self.DONE: + self.current_task_number += 1 + if self.current_task_number >= (len(self.task_xml)): + self.state = self.DONE + self.current_task_number = len(self.task_xml) - 1 + else: + self.state = self.INITIAL + changed = True + self.setup_next_task() + return changed + + def update_task_states_ajax(self, return_html): + """ + Runs the update task states function for ajax calls. Currently the same as update_task_states + Input: The html returned by the handle_ajax function of the child + Output: New html that should be rendered + """ + changed = self.update_task_states() + if changed: + #return_html=self.get_html() + pass + return return_html + + def get_results(self, get): + """ + Gets the results of a given grader via ajax. + Input: AJAX get dictionary + Output: Dictionary to be rendered via ajax that contains the result html. + """ + task_number = int(get['task_number']) + self.update_task_states() + response_dict = self.get_last_response(task_number) + context = {'results': response_dict['post_assessment'], 'task_number': task_number + 1} + html = render_to_string('combined_open_ended_results.html', context) + return {'html': html, 'success': True} + + def handle_ajax(self, dispatch, get): + """ + This is called by courseware.module_render, to handle an AJAX call. + "get" is request.POST. + + Returns a json dictionary: + { 'progress_changed' : True/False, + 'progress': 'none'/'in_progress'/'done', + } + """ + + handlers = { + 'next_problem': self.next_problem, + 'reset': self.reset, + 'get_results': self.get_results + } + + if dispatch not in handlers: + return_html = self.current_task.handle_ajax(dispatch, get, self.system) + return self.update_task_states_ajax(return_html) + + d = handlers[dispatch](get) + return json.dumps(d, cls=ComplexEncoder) + + def next_problem(self, get): + """ + Called via ajax to advance to the next problem. + Input: AJAX get request. + Output: Dictionary to be rendered + """ + self.update_task_states() + return {'success': True, 'html': self.get_html_nonsystem(), 'allow_reset': self.allow_reset} + + def reset(self, get): + """ + If resetting is allowed, reset the state of the combined open ended module. + Input: AJAX get dictionary + Output: AJAX dictionary to tbe rendered + """ + if self.state != self.DONE: + if not self.allow_reset: + return self.out_of_sync_error(get) + + if self.attempts > self.max_attempts: + return { + 'success': False, + 'error': 'Too many attempts.' + } + self.state = self.INITIAL + self.allow_reset = False + for i in xrange(0, len(self.task_xml)): + self.current_task_number = i + self.setup_next_task(reset=True) + self.current_task.reset(self.system) + self.task_states[self.current_task_number] = self.current_task.get_instance_state() + self.current_task_number = 0 + self.allow_reset = False + self.setup_next_task() + return {'success': True, 'html': self.get_html_nonsystem()} + + def get_instance_state(self): + """ + Returns the current instance state. The module can be recreated from the instance state. + Input: None + Output: A dictionary containing the instance state. + """ + + state = { + 'version': self.STATE_VERSION, + 'current_task_number': self.current_task_number, + 'state': self.state, + 'task_states': self.task_states, + 'attempts': self.attempts, + 'ready_to_reset': self.allow_reset, + } + + return json.dumps(state) + + def get_status(self): + """ + Gets the status panel to be displayed at the top right. + Input: None + Output: The status html to be rendered + """ + status = [] + for i in xrange(0, self.current_task_number + 1): + task_data = self.get_last_response(i) + task_data.update({'task_number': i + 1}) + status.append(task_data) + context = {'status_list': status} + status_html = self.system.render_template("combined_open_ended_status.html", context) + + return status_html + + +class CombinedOpenEndedDescriptor(XmlDescriptor, EditingDescriptor): + """ + Module for adding combined open ended questions + """ + mako_template = "widgets/html-edit.html" + module_class = CombinedOpenEndedModule + filename_extension = "xml" + + stores_state = True + has_score = True + template_dir_name = "combinedopenended" + + js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} + js_module_name = "HTMLEditingDescriptor" + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the individual tasks, the rubric, and the prompt, and parse + + Returns: + { + 'rubric': 'some-html', + 'prompt': 'some-html', + 'task_xml': dictionary of xml strings, + } + """ + expected_children = ['task', 'rubric', 'prompt'] + for child in expected_children: + if len(xml_object.xpath(child)) == 0: + raise ValueError("Combined Open Ended definition must include at least one '{0}' tag".format(child)) + + def parse_task(k): + """Assumes that xml_object has child k""" + return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))] + + def parse(k): + """Assumes that xml_object has child k""" + return xml_object.xpath(k)[0] + + return {'task_xml': parse_task('task'), 'prompt': parse('prompt'), 'rubric': parse('rubric')} + + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + elt = etree.Element('combinedopenended') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + elt.append(child_node) + + for child in ['task']: + add_child(child) + + return elt \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/combined_open_ended_rubric.py b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py new file mode 100644 index 0000000000..0b2ca1ca2c --- /dev/null +++ b/common/lib/xmodule/xmodule/combined_open_ended_rubric.py @@ -0,0 +1,129 @@ +from mitxmako.shortcuts import render_to_string +import logging +from lxml import etree + +log=logging.getLogger(__name__) + +class CombinedOpenEndedRubric: + + @staticmethod + def render_rubric(rubric_xml): + try: + rubric_categories = CombinedOpenEndedRubric.extract_rubric_categories(rubric_xml) + html = render_to_string('open_ended_rubric.html', {'rubric_categories' : rubric_categories}) + except: + log.exception("Could not parse the rubric.") + html = rubric_xml + return html + + @staticmethod + def extract_rubric_categories(element): + ''' + Contstruct a list of categories such that the structure looks like: + [ { category: "Category 1 Name", + options: [{text: "Option 1 Name", points: 0}, {text:"Option 2 Name", points: 5}] + }, + { category: "Category 2 Name", + options: [{text: "Option 1 Name", points: 0}, + {text: "Option 2 Name", points: 1}, + {text: "Option 3 Name", points: 2]}] + + ''' + element = etree.fromstring(element) + categories = [] + for category in element: + if category.tag != 'category': + raise Exception("[capa.inputtypes.extract_categories] Expected a tag: got {0} instead".format(category.tag)) + else: + categories.append(CombinedOpenEndedRubric.extract_category(category)) + return categories + + @staticmethod + def extract_category(category): + ''' + construct an individual category + {category: "Category 1 Name", + options: [{text: "Option 1 text", points: 1}, + {text: "Option 2 text", points: 2}]} + + all sorting and auto-point generation occurs in this function + ''' + + has_score=False + descriptionxml = category[0] + scorexml = category[1] + if scorexml.tag == "option": + optionsxml = category[1:] + else: + optionsxml = category[2:] + has_score=True + + # parse description + if descriptionxml.tag != 'description': + raise Exception("[extract_category]: expected description tag, got {0} instead".format(descriptionxml.tag)) + + if has_score: + if scorexml.tag != 'score': + raise Exception("[extract_category]: expected score tag, got {0} instead".format(scorexml.tag)) + + for option in optionsxml: + if option.tag != "option": + raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + + description = descriptionxml.text + + if has_score: + score = int(scorexml.text) + else: + score = 0 + + cur_points = 0 + options = [] + autonumbering = True + # parse options + for option in optionsxml: + if option.tag != 'option': + raise Exception("[extract_category]: expected option tag, got {0} instead".format(option.tag)) + else: + pointstr = option.get("points") + if pointstr: + autonumbering = False + # try to parse this into an int + try: + points = int(pointstr) + except ValueError: + raise Exception("[extract_category]: expected points to have int, got {0} instead".format(pointstr)) + elif autonumbering: + # use the generated one if we're in the right mode + points = cur_points + cur_points = cur_points + 1 + else: + raise Exception("[extract_category]: missing points attribute. Cannot continue to auto-create points values after a points value is explicitly dfined.") + optiontext = option.text + selected = False + if has_score: + if points == score: + selected = True + options.append({'text': option.text, 'points': points, 'selected' : selected}) + + # sort and check for duplicates + options = sorted(options, key=lambda option: option['points']) + CombinedOpenEndedRubric.validate_options(options) + + return {'description': description, 'options': options, 'score' : score, 'has_score' : has_score} + + @staticmethod + def validate_options(options): + ''' + Validates a set of options. This can and should be extended to filter out other bad edge cases + ''' + if len(options) == 0: + raise Exception("[extract_category]: no options associated with this category") + if len(options) == 1: + return + prev = options[0]['points'] + for option in options[1:]: + if prev == option['points']: + raise Exception("[extract_category]: found duplicate point values between two different options") + else: + prev = option['points'] \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/course_module.py b/common/lib/xmodule/xmodule/course_module.py index 2b6232d366..499247cc2d 100644 --- a/common/lib/xmodule/xmodule/course_module.py +++ b/common/lib/xmodule/xmodule/course_module.py @@ -1,9 +1,9 @@ -from fs.errors import ResourceNotFoundError import logging from lxml import etree -from path import path # NOTE (THK): Only used for detecting presence of syllabus +from path import path # NOTE (THK): Only used for detecting presence of syllabus import requests import time +from datetime import datetime from xmodule.util.decorators import lazyproperty from xmodule.graders import load_grading_policy @@ -13,6 +13,7 @@ from xmodule.timeparse import parse_time, stringify_time log = logging.getLogger(__name__) + class CourseDescriptor(SequenceDescriptor): module_class = SequenceModule @@ -96,11 +97,27 @@ class CourseDescriptor(SequenceDescriptor): # disable the syllabus content for courses that do not provide a syllabus self.syllabus_present = self.system.resources_fs.exists(path('syllabus')) + self.test_center_exams = [] + test_center_info = self.metadata.get('testcenter_info') + if test_center_info is not None: + for exam_name in test_center_info: + try: + exam_info = test_center_info[exam_name] + self.test_center_exams.append(self.TestCenterExam(self.id, exam_name, exam_info)) + except Exception as err: + # If we can't parse the test center exam info, don't break + # the rest of the courseware. + msg = 'Error %s: Unable to load test-center exam info for exam "%s" of course "%s"' % (err, exam_name, self.id) + log.error(msg) + continue + + def set_grading_policy(self, policy_str): """Parse the policy specified in policy_str, and save it""" try: self._grading_policy = load_grading_policy(policy_str) - except: + except Exception, err: + log.exception('Failed to load grading policy:') self.system.error_tracker("Failed to load grading policy") # Setting this to an empty dictionary will lead to errors when # grading needs to happen, but should allow course staff to see @@ -149,6 +166,10 @@ class CourseDescriptor(SequenceDescriptor): def grade_cutoffs(self): return self._grading_policy['GRADE_CUTOFFS'] + @property + def lowest_passing_grade(self): + return min(self._grading_policy['GRADE_CUTOFFS'].values()) + @property def tabs(self): """ @@ -160,6 +181,38 @@ class CourseDescriptor(SequenceDescriptor): def show_calculator(self): return self.metadata.get("show_calculator", None) == "Yes" + @property + def is_new(self): + # The course is "new" if either if the metadata flag is_new is + # true or if the course has not started yet + flag = self.metadata.get('is_new', None) + if flag is None: + return self.days_until_start > 1 + elif isinstance(flag, basestring): + return flag.lower() in ['true', 'yes', 'y'] + else: + return bool(flag) + + @property + def days_until_start(self): + def convert_to_datetime(timestamp): + return datetime.fromtimestamp(time.mktime(timestamp)) + + start_date = convert_to_datetime(self.start) + + # Try to use course advertised date if we can parse it + advertised_start = self.metadata.get('advertised_start', None) + if advertised_start: + try: + start_date = datetime.strptime(advertised_start, + "%Y-%m-%dT%H:%M") + except ValueError: + pass # Invalid date, keep using 'start'' + + now = convert_to_datetime(time.gmtime()) + days_until_start = (start_date - now).days + return days_until_start + @lazyproperty def grading_context(self): """ @@ -239,7 +292,6 @@ class CourseDescriptor(SequenceDescriptor): raise ValueError("{0} is not a course location".format(loc)) return "/".join([loc.org, loc.course, loc.name]) - @property def id(self): """Return the course_id for this course""" @@ -247,7 +299,20 @@ class CourseDescriptor(SequenceDescriptor): @property def start_date_text(self): - displayed_start = self._try_parse_time('advertised_start') or self.start + parsed_advertised_start = self._try_parse_time('advertised_start') + + # If the advertised start isn't a real date string, we assume it's free + # form text... + if parsed_advertised_start is None and \ + ('advertised_start' in self.metadata): + return self.metadata['advertised_start'] + + displayed_start = parsed_advertised_start or self.start + + # If we have neither an advertised start or a real start, just return TBD + if not displayed_start: + return "TBD" + return time.strftime("%b %d, %Y", displayed_start) @property @@ -292,7 +357,7 @@ class CourseDescriptor(SequenceDescriptor): return False except: log.exception("Error parsing discussion_blackouts for course {0}".format(self.id)) - + return True @property @@ -312,6 +377,88 @@ class CourseDescriptor(SequenceDescriptor): """ return self.metadata.get('end_of_course_survey_url') + class TestCenterExam(object): + def __init__(self, course_id, exam_name, exam_info): + self.course_id = course_id + self.exam_name = exam_name + self.exam_info = exam_info + self.exam_series_code = exam_info.get('Exam_Series_Code') or exam_name + self.display_name = exam_info.get('Exam_Display_Name') or self.exam_series_code + self.first_eligible_appointment_date = self._try_parse_time('First_Eligible_Appointment_Date') + if self.first_eligible_appointment_date is None: + raise ValueError("First appointment date must be specified") + # TODO: If defaulting the last appointment date, it should be the + # *end* of the same day, not the same time. It's going to be used as the + # end of the exam overall, so we don't want the exam to disappear too soon. + # It's also used optionally as the registration end date, so time matters there too. + self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date + if self.last_eligible_appointment_date is None: + raise ValueError("Last appointment date must be specified") + self.registration_start_date = self._try_parse_time('Registration_Start_Date') or time.gmtime(0) + self.registration_end_date = self._try_parse_time('Registration_End_Date') or self.last_eligible_appointment_date + # do validation within the exam info: + if self.registration_start_date > self.registration_end_date: + raise ValueError("Registration start date must be before registration end date") + if self.first_eligible_appointment_date > self.last_eligible_appointment_date: + raise ValueError("First appointment date must be before last appointment date") + if self.registration_end_date > self.last_eligible_appointment_date: + raise ValueError("Registration end date must be before last appointment date") + + + def _try_parse_time(self, key): + """ + Parse an optional metadata key containing a time: if present, complain + if it doesn't parse. + Return None if not present or invalid. + """ + if key in self.exam_info: + try: + return parse_time(self.exam_info[key]) + except ValueError as e: + msg = "Exam {0} in course {1} loaded with a bad exam_info key '{2}': '{3}'".format(self.exam_name, self.course_id, self.exam_info[key], e) + log.warning(msg) + return None + + def has_started(self): + return time.gmtime() > self.first_eligible_appointment_date + + def has_ended(self): + return time.gmtime() > self.last_eligible_appointment_date + + def has_started_registration(self): + return time.gmtime() > self.registration_start_date + + def has_ended_registration(self): + return time.gmtime() > self.registration_end_date + + def is_registering(self): + now = time.gmtime() + return now >= self.registration_start_date and now <= self.registration_end_date + + @property + def first_eligible_appointment_date_text(self): + return time.strftime("%b %d, %Y", self.first_eligible_appointment_date) + + @property + def last_eligible_appointment_date_text(self): + return time.strftime("%b %d, %Y", self.last_eligible_appointment_date) + + @property + def registration_end_date_text(self): + return time.strftime("%b %d, %Y", self.registration_end_date) + + @property + def current_test_center_exam(self): + exams = [exam for exam in self.test_center_exams if exam.has_started_registration() and not exam.has_ended()] + if len(exams) > 1: + # TODO: output some kind of warning. This should already be + # caught if we decide to do validation at load time. + return exams[0] + elif len(exams) == 1: + return exams[0] + else: + return None + @property def title(self): return self.display_name @@ -323,4 +470,3 @@ class CourseDescriptor(SequenceDescriptor): @property def org(self): return self.location.org - diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index fd67a3804e..929b6dcb48 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -121,16 +121,6 @@ section.problem { } } - &.processing { - p.status { - @include inline-block(); - background: url('../images/spinner.gif') center center no-repeat; - height: 20px; - width: 20px; - text-indent: -9999px; - } - } - &.correct, &.ui-icon-check { p.status { @include inline-block(); @@ -250,6 +240,13 @@ section.problem { } } + .reload + { + float:right; + margin: 10px; + } + + .grader-status { padding: 9px; background: #F6F6F6; @@ -266,6 +263,13 @@ section.problem { margin: -7px 7px 0 0; } + .grading { + background: url('../images/info-icon.png') left center no-repeat; + padding-left: 25px; + text-indent: 0px; + margin: 0px 7px 0 0; + } + p { line-height: 20px; text-transform: capitalize; @@ -293,6 +297,51 @@ section.problem { float: left; } } + + } + .evaluation { + p { + margin-bottom: 4px; + } + } + + + .feedback-on-feedback { + height: 100px; + margin-right: 20px; + } + + .evaluation-response { + header { + text-align: right; + a { + font-size: .85em; + } + } + } + + .evaluation-scoring { + .scoring-list { + list-style-type: none; + margin-left: 3px; + + li { + &:first-child { + margin-left: 0px; + } + display:inline; + margin-left: 50px; + + label { + font-size: .9em; + } + + } + } + + } + .submit-message-container { + margin: 10px 0px ; } } @@ -630,6 +679,10 @@ section.problem { color: #2C2C2C; font-family: monospace; font-size: 1em; + padding-top: 10px; + header { + font-size: 1.4em; + } .shortform { font-weight: bold; @@ -685,6 +738,21 @@ section.problem { color: #B00; } } + + .markup-text{ + margin: 5px; + padding: 20px 0px 15px 50px; + border-top: 1px solid #DDD; + border-left: 20px solid #FAFAFA; + + bs { + color: #BB0000; + } + + bg { + color: #BDA046; + } + } } } } diff --git a/common/lib/xmodule/xmodule/css/combinedopenended/display.scss b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss new file mode 100644 index 0000000000..a58e30f1e2 --- /dev/null +++ b/common/lib/xmodule/xmodule/css/combinedopenended/display.scss @@ -0,0 +1,626 @@ +h2 { + margin-top: 0; + margin-bottom: 15px; + + &.problem-header { + section.staff { + margin-top: 30px; + font-size: 80%; + } + } + + @media print { + display: block; + width: auto; + border-right: 0; + } +} + +.inline-error { + color: darken($error-red, 10%); +} + +section.combined-open-ended { + @include clearfix; + .status-container + { + float:right; + width:40%; + } + .item-container + { + float:left; + width: 53%; + padding-bottom: 50px; + } + + .result-container + { + float:left; + width: 93%; + position:relative; + } +} + +section.combined-open-ended-status { + + .statusitem { + background-color: #FAFAFA; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + } + + .statusitem-current { + background-color: #BEBEBE; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + } + + span { + &.unanswered { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + position: relative; + width: 14px; + float: right; + } + + &.correct { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + position: relative; + width: 25px; + float: right; + } + + &.incorrect { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + position: relative; + float: right; + } + } +} + +div.result-container { + + .evaluation { + p { + margin-bottom: 1px; + } + } + + .feedback-on-feedback { + height: 100px; + margin-right: 0px; + } + + .evaluation-response { + header { + text-align: right; + a { + font-size: .85em; + } + } + } + .evaluation-scoring { + .scoring-list { + list-style-type: none; + margin-left: 3px; + + li { + &:first-child { + margin-left: 0px; + } + display:inline; + margin-left: 0px; + + label { + font-size: .9em; + } + } + } + } + .submit-message-container { + margin: 10px 0px ; + } + + .external-grader-message { + section { + padding-left: 20px; + background-color: #FAFAFA; + color: #2C2C2C; + font-family: monospace; + font-size: 1em; + padding-top: 10px; + header { + font-size: 1.4em; + } + + .shortform { + font-weight: bold; + } + + .longform { + padding: 0px; + margin: 0px; + + .result-errors { + margin: 5px; + padding: 10px 10px 10px 40px; + background: url('../images/incorrect-icon.png') center left no-repeat; + li { + color: #B00; + } + } + + .result-output { + margin: 5px; + padding: 20px 0px 15px 50px; + border-top: 1px solid #DDD; + border-left: 20px solid #FAFAFA; + + h4 { + font-family: monospace; + font-size: 1em; + } + + dl { + margin: 0px; + } + + dt { + margin-top: 20px; + } + + dd { + margin-left: 24pt; + } + } + + .result-correct { + background: url('../images/correct-icon.png') left 20px no-repeat; + .result-actual-output { + color: #090; + } + } + + .result-incorrect { + background: url('../images/incorrect-icon.png') left 20px no-repeat; + .result-actual-output { + color: #B00; + } + } + + .markup-text{ + margin: 5px; + padding: 20px 0px 15px 50px; + border-top: 1px solid #DDD; + border-left: 20px solid #FAFAFA; + + bs { + color: #BB0000; + } + + bg { + color: #BDA046; + } + } + } + } + } +} + +div.result-container, section.open-ended-child { + .rubric { + tr { + margin:10px 0px; + height: 100%; + } + td { + padding: 20px 0px; + margin: 10px 0px; + height: 100%; + } + th { + padding: 5px; + margin: 5px; + } + label, + .view-only { + margin:10px; + position: relative; + padding: 15px; + width: 200px; + height:100%; + display: inline-block; + min-height: 50px; + min-width: 50px; + background-color: #CCC; + font-size: 1em; + } + .grade { + position: absolute; + bottom:0px; + right:0px; + margin:10px; + } + .selected-grade { + background: #666; + color: white; + } + input[type=radio]:checked + label { + background: #666; + color: white; } + input[class='score-selection'] { + display: none; + } + } +} + +section.open-ended-child { + @media print { + display: block; + width: auto; + padding: 0; + + canvas, img { + page-break-inside: avoid; + } + } + + .inline { + display: inline; + } + + ol.enumerate { + li { + &:before { + content: " "; + display: block; + height: 0; + visibility: hidden; + } + } + } + + .solution-span { + > span { + margin: 20px 0; + display: block; + border: 1px solid #ddd; + padding: 9px 15px 20px; + background: #FFF; + position: relative; + @include box-shadow(inset 0 0 0 1px #eee); + @include border-radius(3px); + + &:empty { + display: none; + } + } + } + + p { + &.answer { + margin-top: -2px; + } + &.status { + text-indent: -9999px; + margin: 8px 0 0 10px; + } + } + + div.unanswered { + p.status { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + width: 14px; + } + } + + div.correct, div.ui-icon-check { + p.status { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + width: 25px; + } + + input { + border-color: green; + } + } + + div.processing { + p.status { + @include inline-block(); + background: url('../images/spinner.gif') center center no-repeat; + height: 20px; + width: 20px; + } + + input { + border-color: #aaa; + } + } + + div.incorrect, div.ui-icon-close { + p.status { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + text-indent: -9999px; + } + + input { + border-color: red; + } + } + + > span { + display: block; + margin-bottom: lh(.5); + } + + p.answer { + @include inline-block(); + margin-bottom: 0; + margin-left: 10px; + + &:before { + content: "Answer: "; + font-weight: bold; + display: inline; + + } + &:empty { + &:before { + display: none; + } + } + } + + span { + &.unanswered, &.ui-icon-bullet { + @include inline-block(); + background: url('../images/unanswered-icon.png') center center no-repeat; + height: 14px; + position: relative; + top: 4px; + width: 14px; + } + + &.processing, &.ui-icon-processing { + @include inline-block(); + background: url('../images/spinner.gif') center center no-repeat; + height: 20px; + position: relative; + top: 6px; + width: 25px; + } + + &.correct, &.ui-icon-check { + @include inline-block(); + background: url('../images/correct-icon.png') center center no-repeat; + height: 20px; + position: relative; + top: 6px; + width: 25px; + } + + &.incorrect, &.ui-icon-close { + @include inline-block(); + background: url('../images/incorrect-icon.png') center center no-repeat; + height: 20px; + width: 20px; + position: relative; + top: 6px; + } + } + + .reload + { + float:right; + margin: 10px; + } + + + .grader-status { + padding: 9px; + background: #F6F6F6; + border: 1px solid #ddd; + border-top: 0; + margin-bottom: 20px; + @include clearfix; + + span { + text-indent: -9999px; + overflow: hidden; + display: block; + float: left; + margin: -7px 7px 0 0; + } + + .grading { + background: url('../images/info-icon.png') left center no-repeat; + padding-left: 25px; + text-indent: 0px; + margin: 0px 7px 0 0; + } + + p { + line-height: 20px; + text-transform: capitalize; + margin-bottom: 0; + float: left; + } + + &.file { + background: #FFF; + margin-top: 20px; + padding: 20px 0 0 0; + + border: { + top: 1px solid #eee; + right: 0; + bottom: 0; + left: 0; + } + + p.debug { + display: none; + } + + input { + float: left; + } + } + + } + + form.option-input { + margin: -10px 0 20px; + padding-bottom: 20px; + + select { + margin-right: flex-gutter(); + } + } + + ul { + list-style: disc outside none; + margin-bottom: lh(); + margin-left: .75em; + margin-left: .75rem; + } + + ol { + list-style: decimal outside none; + margin-bottom: lh(); + margin-left: .75em; + margin-left: .75rem; + } + + dl { + line-height: 1.4em; + } + + dl dt { + font-weight: bold; + } + + dl dd { + margin-bottom: 0; + } + + dd { + margin-left: .5em; + margin-left: .5rem; + } + + li { + line-height: 1.4em; + margin-bottom: lh(.5); + + &:last-child { + margin-bottom: 0; + } + } + + p { + margin-bottom: lh(); + } + + hr { + background: #ddd; + border: none; + clear: both; + color: #ddd; + float: none; + height: 1px; + margin: 0 0 .75rem; + width: 100%; + } + + .hidden { + display: none; + visibility: hidden; + } + + #{$all-text-inputs} { + display: inline; + width: auto; + } + + section.action { + margin-top: 20px; + + input.save { + @extend .blue-button; + } + + .submission_feedback { + // background: #F3F3F3; + // border: 1px solid #ddd; + // @include border-radius(3px); + // padding: 8px 12px; + // margin-top: 10px; + @include inline-block; + font-style: italic; + margin: 8px 0 0 10px; + color: #777; + -webkit-font-smoothing: antialiased; + } + } + + .detailed-solution { + > p:first-child { + font-size: 0.9em; + font-weight: bold; + font-style: normal; + text-transform: uppercase; + color: #AAA; + } + + p:last-child { + margin-bottom: 0; + } + } + + div.open-ended-alert { + padding: 8px 12px; + border: 1px solid #EBE8BF; + border-radius: 3px; + background: #FFFCDD; + font-size: 0.9em; + margin-top: 10px; + } + + div.capa_reset { + padding: 25px; + border: 1px solid $error-red; + background-color: lighten($error-red, 25%); + border-radius: 3px; + font-size: 1em; + margin-top: 10px; + margin-bottom: 10px; + } + .capa_reset>h2 { + color: #AA0000; + } + .capa_reset li { + font-size: 0.9em; + } + +} diff --git a/common/lib/xmodule/xmodule/error_module.py b/common/lib/xmodule/xmodule/error_module.py index 65fceb77c7..2df47e05e6 100644 --- a/common/lib/xmodule/xmodule/error_module.py +++ b/common/lib/xmodule/xmodule/error_module.py @@ -149,14 +149,14 @@ class ErrorDescriptor(JSONEditingDescriptor): ''' try: xml = etree.fromstring(self.definition['data']['contents']) - return etree.tostring(xml) + return etree.tostring(xml, encoding='unicode') except etree.XMLSyntaxError: # still not valid. root = etree.Element('error') root.text = self.definition['data']['contents'] err_node = etree.SubElement(root, 'error_msg') err_node.text = self.definition['data']['error_msg'] - return etree.tostring(root) + return etree.tostring(root, encoding='unicode') class NonStaffErrorDescriptor(ErrorDescriptor): diff --git a/common/lib/xmodule/xmodule/graders.py b/common/lib/xmodule/xmodule/graders.py index 8f885dc9d2..a183cec98b 100644 --- a/common/lib/xmodule/xmodule/graders.py +++ b/common/lib/xmodule/xmodule/graders.py @@ -316,7 +316,7 @@ class AssignmentFormatGrader(CourseGrader): min_count = 2 would produce the labels "Assignment 3", "Assignment 4" """ - def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, starting_index=1): + def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None, show_only_average=False, hide_average=False, starting_index=1): self.type = type self.min_count = min_count self.drop_count = drop_count @@ -325,6 +325,7 @@ class AssignmentFormatGrader(CourseGrader): self.short_label = short_label or self.type self.show_only_average = show_only_average self.starting_index = starting_index + self.hide_average = hide_average def grade(self, grade_sheet, generate_random_scores=False): def totalWithDrops(breakdown, drop_count): @@ -385,7 +386,8 @@ class AssignmentFormatGrader(CourseGrader): if self.show_only_average: breakdown = [] - breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True}) + if not self.hide_average: + breakdown.append({'percent': total_percent, 'label': total_label, 'detail': total_detail, 'category': self.category, 'prominent': True}) return {'percent': total_percent, 'section_breakdown': breakdown, diff --git a/common/lib/xmodule/xmodule/gst_module.py b/common/lib/xmodule/xmodule/gst_module.py new file mode 100644 index 0000000000..ef1be96c84 --- /dev/null +++ b/common/lib/xmodule/xmodule/gst_module.py @@ -0,0 +1,194 @@ +""" +Graphical slider tool module is ungraded xmodule used by students to +understand functional dependencies. +""" + +import json +import logging +from lxml import etree +from lxml import html +import xmltodict + +from xmodule.mako_module import MakoModuleDescriptor +from xmodule.xml_module import XmlDescriptor +from xmodule.x_module import XModule +from xmodule.stringify import stringify_children +from pkg_resources import resource_string + + +log = logging.getLogger(__name__) + + +class GraphicalSliderToolModule(XModule): + ''' Graphical-Slider-Tool Module + ''' + + js = { + 'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')], + 'js': [ + # 3rd party libraries used by graphic slider tool. + # TODO - where to store them - outside xmodule? + resource_string(__name__, 'js/src/graphical_slider_tool/gst_main.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/state.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/logme.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/general_methods.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/sliders.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/inputs.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/graph.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/el_output.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/g_label_el_output.js'), + resource_string(__name__, 'js/src/graphical_slider_tool/gst.js') + + ] + } + js_module_name = "GraphicalSliderTool" + + def __init__(self, system, location, definition, descriptor, instance_state=None, + shared_state=None, **kwargs): + """ + For XML file format please look at documentation. TODO - receive + information where to store XML documentation. + """ + XModule.__init__(self, system, location, definition, descriptor, + instance_state, shared_state, **kwargs) + + def get_html(self): + """ Renders parameters to template. """ + + # these 3 will be used in class methods + self.html_id = self.location.html_id() + self.html_class = self.location.category + self.configuration_json = self.build_configuration_json() + params = { + 'gst_html': self.substitute_controls(self.definition['render']), + 'element_id': self.html_id, + 'element_class': self.html_class, + 'configuration_json': self.configuration_json + } + self.content = self.system.render_template( + 'graphical_slider_tool.html', params) + return self.content + + def substitute_controls(self, html_string): + """ Substitutes control elements (slider, textbox and plot) in + html_string with their divs. Html_string is content of tag + inside tag. Documentation on how information in + tag is organized and processed is located in: + mitx/docs/build/html/graphical_slider_tool.html. + + Args: + html_string: content of tag, with controls as xml tags, + e.g. . + + Returns: + html_string with control tags replaced by proper divs + ( ->
) + """ + + xml = html.fromstring(html_string) + + #substitute plot, if presented + plot_div = '
' + plot_el = xml.xpath('//plot') + if plot_el: + plot_el = plot_el[0] + plot_el.getparent().replace(plot_el, html.fromstring( + plot_div.format(element_class=self.html_class, + element_id=self.html_id, + style=plot_el.get('style', "")))) + + #substitute sliders + slider_div = '
\ +
' + slider_els = xml.xpath('//slider') + for slider_el in slider_els: + slider_el.getparent().replace(slider_el, html.fromstring( + slider_div.format(element_class=self.html_class, + element_id=self.html_id, + var=slider_el.get('var', ""), + style=slider_el.get('style', "")))) + + # substitute inputs aka textboxes + input_div = '' + input_els = xml.xpath('//textbox') + for input_index, input_el in enumerate(input_els): + input_el.getparent().replace(input_el, html.fromstring( + input_div.format(element_class=self.html_class, + element_id=self.html_id, + var=input_el.get('var', ""), + style=input_el.get('style', ""), + input_index=input_index))) + + return html.tostring(xml) + + def build_configuration_json(self): + """Creates json element from xml element (with aim to transfer later + directly to javascript via hidden field in template). Steps: + + 1. Convert xml tree to python dict. + + 2. Dump dict to json. + + """ + # added for interface compatibility with xmltodict.parse + # class added for javascript's part purposes + return json.dumps(xmltodict.parse('' + self.definition['configuration'] + '')) + + +class GraphicalSliderToolDescriptor(MakoModuleDescriptor, XmlDescriptor): + module_class = GraphicalSliderToolModule + template_dir_name = 'graphical_slider_tool' + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the data into dictionary. + + Args: + xml_object: xml from file. + + Returns: + dict + """ + # check for presense of required tags in xml + expected_children_level_0 = ['render', 'configuration'] + for child in expected_children_level_0: + if len(xml_object.xpath(child)) != 1: + raise ValueError("Graphical Slider Tool definition must include \ + exactly one '{0}' tag".format(child)) + + expected_children_level_1 = ['functions'] + for child in expected_children_level_1: + if len(xml_object.xpath('configuration')[0].xpath(child)) != 1: + raise ValueError("Graphical Slider Tool definition must include \ + exactly one '{0}' tag".format(child)) + # finished + + def parse(k): + """Assumes that xml_object has child k""" + return stringify_children(xml_object.xpath(k)[0]) + return { + 'render': parse('render'), + 'configuration': parse('configuration') + } + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + xml_object = etree.Element('graphical_slider_tool') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + xml_object.append(child_node) + + for child in ['render', 'configuration']: + add_child(child) + + return xml_object diff --git a/common/lib/xmodule/xmodule/html_module.py b/common/lib/xmodule/xmodule/html_module.py index 2023ac7017..c11c7d22e7 100644 --- a/common/lib/xmodule/xmodule/html_module.py +++ b/common/lib/xmodule/xmodule/html_module.py @@ -7,15 +7,14 @@ from lxml import etree from lxml.html import rewrite_links from path import path -from .x_module import XModule from pkg_resources import resource_string -from .xml_module import XmlDescriptor, name_to_pathname -from .editing_module import EditingDescriptor -from .stringify import stringify_children -from .html_checker import check_html -from xmodule.modulestore import Location - from xmodule.contentstore.content import XASSET_SRCREF_PREFIX, StaticContent +from xmodule.editing_module import EditingDescriptor +from xmodule.html_checker import check_html +from xmodule.modulestore import Location +from xmodule.stringify import stringify_children +from xmodule.x_module import XModule +from xmodule.xml_module import XmlDescriptor, name_to_pathname log = logging.getLogger("mitx.courseware") @@ -123,7 +122,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): try: with system.resources_fs.open(filepath) as file: - html = file.read() + html = file.read().decode('utf-8') # Log a warning if we can't parse the file, but don't error if not check_html(html): msg = "Couldn't parse html in {0}.".format(filepath) @@ -164,7 +163,7 @@ class HtmlDescriptor(XmlDescriptor, EditingDescriptor): resource_fs.makedir(os.path.dirname(filepath), allow_recreate=True) with resource_fs.open(filepath, 'w') as file: - file.write(self.definition['data']) + file.write(self.definition['data'].encode('utf-8')) # write out the relative name relname = path(pathname).basename() diff --git a/common/lib/xmodule/xmodule/js/src/capa/schematic.js b/common/lib/xmodule/xmodule/js/src/capa/schematic.js index b033dbaf46..bebe6b1854 100644 --- a/common/lib/xmodule/xmodule/js/src/capa/schematic.js +++ b/common/lib/xmodule/xmodule/js/src/capa/schematic.js @@ -1953,7 +1953,7 @@ cktsim = (function() { var module = { 'Circuit': Circuit, 'parse_number': parse_number, - 'parse_source': parse_source, + 'parse_source': parse_source } return module; }()); @@ -2068,7 +2068,7 @@ schematic = (function() { 'n': [NFet, 'NFet'], 'p': [PFet, 'PFet'], 's': [Probe, 'Voltage Probe'], - 'a': [Ammeter, 'Current Probe'], + 'a': [Ammeter, 'Current Probe'] }; // global clipboard @@ -5502,7 +5502,7 @@ schematic = (function() { 'magenta' : 'rgb(255,64,255)', 'yellow': 'rgb(255,255,64)', 'black': 'rgb(0,0,0)', - 'x-axis': undefined, + 'x-axis': undefined }; function Probe(x,y,rotation,color,offset) { @@ -6100,7 +6100,7 @@ schematic = (function() { 'Amplitude', 'Frequency (Hz)', 'Delay until sin starts (secs)', - 'Phase offset (degrees)'], + 'Phase offset (degrees)'] } // build property editor div @@ -6300,7 +6300,7 @@ schematic = (function() { var module = { 'Schematic': Schematic, - 'component_slider': component_slider, + 'component_slider': component_slider } return module; }()); diff --git a/common/lib/xmodule/xmodule/js/src/collapsible.coffee b/common/lib/xmodule/xmodule/js/src/collapsible.coffee index 18a186e106..e414935784 100644 --- a/common/lib/xmodule/xmodule/js/src/collapsible.coffee +++ b/common/lib/xmodule/xmodule/js/src/collapsible.coffee @@ -22,7 +22,7 @@ class @Collapsible if $(event.target).text() == 'See full output' new_text = 'Hide output' else - new_text = 'See full ouput' + new_text = 'See full output' $(event.target).text(new_text) @toggleHint: (event) => diff --git a/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee new file mode 100644 index 0000000000..2cbba143a3 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/combinedopenended/display.coffee @@ -0,0 +1,282 @@ +class @CombinedOpenEnded + constructor: (element) -> + @element=element + @reinitialize(element) + + reinitialize: (element) -> + @wrapper=$(element).find('section.xmodule_CombinedOpenEndedModule') + @el = $(element).find('section.combined-open-ended') + @combined_open_ended=$(element).find('section.combined-open-ended') + @id = @el.data('id') + @ajax_url = @el.data('ajax-url') + @state = @el.data('state') + @task_count = @el.data('task-count') + @task_number = @el.data('task-number') + + @allow_reset = @el.data('allow_reset') + @reset_button = @$('.reset-button') + @reset_button.click @reset + @next_problem_button = @$('.next-step-button') + @next_problem_button.click @next_problem + + @show_results_button=@$('.show-results-button') + @show_results_button.click @show_results + + # valid states: 'initial', 'assessing', 'post_assessment', 'done' + Collapsible.setCollapsibles(@el) + @submit_evaluation_button = $('.submit-evaluation-button') + @submit_evaluation_button.click @message_post + + @results_container = $('.result-container') + + # Where to put the rubric once we load it + @el = $(element).find('section.open-ended-child') + @errors_area = @$('.error') + @answer_area = @$('textarea.answer') + + @rubric_wrapper = @$('.rubric-wrapper') + @hint_wrapper = @$('.hint-wrapper') + @message_wrapper = @$('.message-wrapper') + @submit_button = @$('.submit-button') + @child_state = @el.data('state') + @child_type = @el.data('child-type') + if @child_type=="openended" + @skip_button = @$('.skip-button') + @skip_button.click @skip_post_assessment + + @open_ended_child= @$('.open-ended-child') + + @find_assessment_elements() + @find_hint_elements() + + @rebind() + + # locally scoped jquery. + $: (selector) -> + $(selector, @el) + + show_results: (event) => + status_item = $(event.target).parent().parent() + status_number = status_item.data('status-number') + data = {'task_number' : status_number} + $.postWithPrefix "#{@ajax_url}/get_results", data, (response) => + if response.success + @results_container.after(response.html).remove() + @results_container = $('div.result-container') + @submit_evaluation_button = $('.submit-evaluation-button') + @submit_evaluation_button.click @message_post + Collapsible.setCollapsibles(@results_container) + else + @errors_area.html(response.error) + + message_post: (event)=> + Logger.log 'message_post', @answers + external_grader_message=$(event.target).parent().parent().parent() + evaluation_scoring = $(event.target).parent() + + fd = new FormData() + feedback = evaluation_scoring.find('textarea.feedback-on-feedback')[0].value + submission_id = external_grader_message.find('input.submission_id')[0].value + grader_id = external_grader_message.find('input.grader_id')[0].value + score = evaluation_scoring.find("input:radio[name='evaluation-score']:checked").val() + + fd.append('feedback', feedback) + fd.append('submission_id', submission_id) + fd.append('grader_id', grader_id) + if(!score) + @gentle_alert "You need to pick a rating before you can submit." + return + else + fd.append('score', score) + + settings = + type: "POST" + data: fd + processData: false + contentType: false + success: (response) => + @gentle_alert response.msg + $('section.evaluation').slideToggle() + @message_wrapper.html(response.message_html) + + $.ajaxWithPrefix("#{@ajax_url}/save_post_assessment", settings) + + + rebind: () => + # rebind to the appropriate function for the current state + @submit_button.unbind('click') + @submit_button.show() + @reset_button.hide() + @next_problem_button.hide() + @hint_area.attr('disabled', false) + + if @child_type=="openended" + @skip_button.hide() + if @allow_reset=="True" + @reset_button.show() + @submit_button.hide() + @answer_area.attr("disabled", true) + @hint_area.attr('disabled', true) + else if @child_state == 'initial' + @answer_area.attr("disabled", false) + @submit_button.prop('value', 'Submit') + @submit_button.click @save_answer + else if @child_state == 'assessing' + @answer_area.attr("disabled", true) + @submit_button.prop('value', 'Submit assessment') + @submit_button.click @save_assessment + if @child_type == "openended" + @submit_button.hide() + @queueing() + else if @child_state == 'post_assessment' + if @child_type=="openended" + @skip_button.show() + @skip_post_assessment() + @answer_area.attr("disabled", true) + @submit_button.prop('value', 'Submit post-assessment') + if @child_type=="selfassessment" + @submit_button.click @save_hint + else + @submit_button.click @message_post + else if @child_state == 'done' + @answer_area.attr("disabled", true) + @hint_area.attr('disabled', true) + @submit_button.hide() + if @child_type=="openended" + @skip_button.hide() + if @task_number<@task_count + @next_problem() + else + @reset_button.show() + + + find_assessment_elements: -> + @assessment = @$('select.assessment') + + find_hint_elements: -> + @hint_area = @$('textarea.post_assessment') + + save_answer: (event) => + event.preventDefault() + if @child_state == 'initial' + data = {'student_answer' : @answer_area.val()} + $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) => + if response.success + @rubric_wrapper.html(response.rubric_html) + @child_state = 'assessing' + @find_assessment_elements() + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + save_assessment: (event) => + event.preventDefault() + if @child_state == 'assessing' + data = {'assessment' : @assessment.find(':selected').text()} + $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => + if response.success + @child_state = response.state + + if @child_state == 'post_assessment' + @hint_wrapper.html(response.hint_html) + @find_hint_elements() + else if @child_state == 'done' + @message_wrapper.html(response.message_html) + + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + save_hint: (event) => + event.preventDefault() + if @child_state == 'post_assessment' + data = {'hint' : @hint_area.val()} + + $.postWithPrefix "#{@ajax_url}/save_post_assessment", data, (response) => + if response.success + @message_wrapper.html(response.message_html) + @child_state = 'done' + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + skip_post_assessment: => + if @child_state == 'post_assessment' + + $.postWithPrefix "#{@ajax_url}/skip_post_assessment", {}, (response) => + if response.success + @child_state = 'done' + @rebind() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + reset: (event) => + event.preventDefault() + if @child_state == 'done' or @allow_reset=="True" + $.postWithPrefix "#{@ajax_url}/reset", {}, (response) => + if response.success + @answer_area.val('') + @rubric_wrapper.html('') + @hint_wrapper.html('') + @message_wrapper.html('') + @child_state = 'initial' + @combined_open_ended.after(response.html).remove() + @allow_reset="False" + @reinitialize(@element) + @rebind() + @reset_button.hide() + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + next_problem: => + if @child_state == 'done' + $.postWithPrefix "#{@ajax_url}/next_problem", {}, (response) => + if response.success + @answer_area.val('') + @rubric_wrapper.html('') + @hint_wrapper.html('') + @message_wrapper.html('') + @child_state = 'initial' + @combined_open_ended.after(response.html).remove() + @reinitialize(@element) + @rebind() + @next_problem_button.hide() + if !response.allow_reset + @gentle_alert "Moved to next step." + else + @gentle_alert "Your score did not meet the criteria to move to the next step." + else + @errors_area.html(response.error) + else + @errors_area.html('Problem state got out of sync. Try reloading the page.') + + gentle_alert: (msg) => + if @el.find('.open-ended-alert').length + @el.find('.open-ended-alert').remove() + alert_elem = "
" + msg + "
" + @el.find('.open-ended-action').after(alert_elem) + @el.find('.open-ended-alert').css(opacity: 0).animate(opacity: 1, 700) + + queueing: => + if @child_state=="assessing" and @child_type=="openended" + if window.queuePollerID # Only one poller 'thread' per Problem + window.clearTimeout(window.queuePollerID) + window.queuePollerID = window.setTimeout(@poll, 10000) + + poll: => + $.postWithPrefix "#{@ajax_url}/check_for_score", (response) => + if response.state == "done" or response.state=="post_assessment" + delete window.queuePollerID + location.reload() + else + window.queuePollerID = window.setTimeout(@poll, 10000) \ No newline at end of file diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js new file mode 100644 index 0000000000..3175aae3f0 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/el_output.js @@ -0,0 +1,139 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('ElOutput', ['logme'], function (logme) { + + return ElOutput; + + function ElOutput(config, state) { + + if ($.isPlainObject(config.functions.function)) { + processFuncObj(config.functions.function); + } else if ($.isArray(config.functions.function)) { + (function (c1) { + while (c1 < config.functions.function.length) { + if ($.isPlainObject(config.functions.function[c1])) { + processFuncObj(config.functions.function[c1]); + } + + c1 += 1; + } + }(0)); + } + + return; + + function processFuncObj(obj) { + var paramNames, funcString, func, el, disableAutoReturn, updateOnEvent; + + // We are only interested in functions that are meant for output to an + // element. + if ( + (typeof obj['@output'] !== 'string') || + ((obj['@output'].toLowerCase() !== 'element') && (obj['@output'].toLowerCase() !== 'none')) + ) { + return; + } + + if (typeof obj['@el_id'] !== 'string') { + logme('ERROR: You specified "output" as "element", but did not spify "el_id".'); + + return; + } + + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function body is not defined.'); + + return; + } + + updateOnEvent = 'slide'; + if ( + (obj.hasOwnProperty('@update_on') === true) && + (typeof obj['@update_on'] === 'string') && + ((obj['@update_on'].toLowerCase() === 'slide') || (obj['@update_on'].toLowerCase() === 'change')) + ) { + updateOnEvent = obj['@update_on'].toLowerCase(); + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = obj['#text']; + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Make sure that all HTML entities are converted to their proper + // ASCII text equivalents. + funcString = $('
').html(funcString).text(); + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return; + } + + paramNames.pop(); + + if (obj['@output'].toLowerCase() !== 'none') { + el = $('#' + obj['@el_id']); + + if (el.length !== 1) { + logme( + 'ERROR: DOM element with ID "' + obj['@el_id'] + '" ' + + 'not found. Dynamic element not created.' + ); + + return; + } + + el.html(func.apply(window, state.getAllParameterValues())); + } else { + el = null; + func.apply(window, state.getAllParameterValues()); + } + + state.addDynamicEl(el, func, obj['@el_id'], updateOnEvent); + } + + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js new file mode 100644 index 0000000000..13c9dd3389 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/g_label_el_output.js @@ -0,0 +1,113 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('GLabelElOutput', ['logme'], function (logme) { + return GLabelElOutput; + + function GLabelElOutput(config, state) { + if ($.isPlainObject(config.functions.function)) { + processFuncObj(config.functions.function); + } else if ($.isArray(config.functions.function)) { + (function (c1) { + while (c1 < config.functions.function.length) { + if ($.isPlainObject(config.functions.function[c1])) { + processFuncObj(config.functions.function[c1]); + } + + c1 += 1; + } + }(0)); + } + + return; + + function processFuncObj(obj) { + var paramNames, funcString, func, disableAutoReturn; + + // We are only interested in functions that are meant for output to an + // element. + if ( + (typeof obj['@output'] !== 'string') || + (obj['@output'].toLowerCase() !== 'plot_label') + ) { + return; + } + + if (typeof obj['@el_id'] !== 'string') { + logme('ERROR: You specified "output" as "plot_label", but did not spify "el_id".'); + + return; + } + + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function body is not defined.'); + + return; + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = obj['#text']; + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Make sure that all HTML entities are converted to their proper + // ASCII text equivalents. + funcString = $('
').html(funcString).text(); + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return; + } + + paramNames.pop(); + + state.plde.push({ + 'elId': obj['@el_id'], + 'func': func + }); + } + + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js new file mode 100644 index 0000000000..9cdd4fff0f --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/general_methods.js @@ -0,0 +1,23 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('GeneralMethods', [], function () { + if (!String.prototype.trim) { + // http://blog.stevenlevithan.com/archives/faster-trim-javascript + String.prototype.trim = function trim(str) { + return str.replace(/^\s\s*/, '').replace(/\s\s*$/, ''); + }; + } + + return { + 'module_name': 'GeneralMethods', + 'module_status': 'OK' + }; +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js new file mode 100644 index 0000000000..5b6223df43 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/graph.js @@ -0,0 +1,1496 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('Graph', ['logme'], function (logme) { + + return Graph; + + function Graph(gstId, config, state) { + var plotDiv, dataSeries, functions, xaxis, yaxis, numPoints, xrange, + asymptotes, movingLabels, xTicksNames, yTicksNames, graphBarWidth, graphBarAlign; + + // We need plot configuration settings. Without them we can't continue. + if ($.isPlainObject(config.plot) === false) { + return; + } + + // We must have a graph container DIV element available in order to + // proceed. + plotDiv = $('#' + gstId + '_plot'); + if (plotDiv.length === 0) { + logme('ERROR: Could not find the plot DIV with ID "' + gstId + '_plot".'); + + return; + } + + if (plotDiv.width() === 0) { + plotDiv.width(300); + } + + // Sometimes, when height is not explicitly set via CSS (or by some + // other means), it is 0 pixels by default. When Flot will try to plot + // a graph in this DIV with 0 height, then it will raise an error. To + // prevent this, we will set it to be equal to the width. + if (plotDiv.height() === 0) { + plotDiv.height(plotDiv.width()); + } + + plotDiv.css('position', 'relative'); + + // Configure some settings for the graph. + if (setGraphXRange() === false) { + logme('ERROR: Could not configure the xrange. Will not continue.'); + + return; + } + + if (setGraphAxes() === false) { + logme('ERROR: Could not process configuration for the axes.'); + + return; + } + + graphBarWidth = 1; + graphBarAlign = null; + + getBarWidth(); + getBarAlign(); + + // Get the user defined functions. If there aren't any, don't do + // anything else. + createFunctions(); + + if (functions.length === 0) { + logme('ERROR: No functions were specified, or something went wrong.'); + + return; + } + + if (createMarkingsFunctions() === false) { + return; + } + if (createMovingLabelFunctions() === false) { + return; + } + + // Create the initial graph and plot it for the user to see. + if (generateData() === true) { + updatePlot(); + } + + // Bind an event. Whenever some constant changes, the graph will be + // redrawn + state.bindUpdatePlotEvent(plotDiv, onUpdatePlot); + + return; + + function getBarWidth() { + if (config.plot.hasOwnProperty('bar_width') === false) { + return; + } + + if (typeof config.plot.bar_width !== 'string') { + logme('ERROR: The parameter config.plot.bar_width must be a string.'); + + return; + } + + if (isFinite(graphBarWidth = parseFloat(config.plot.bar_width)) === false) { + logme('ERROR: The parameter config.plot.bar_width is not a valid floating number.'); + graphBarWidth = 1; + + return; + } + + return; + } + + function getBarAlign() { + if (config.plot.hasOwnProperty('bar_align') === false) { + return; + } + + if (typeof config.plot.bar_align !== 'string') { + logme('ERROR: The parameter config.plot.bar_align must be a string.'); + + return; + } + + if ( + (config.plot.bar_align.toLowerCase() !== 'left') && + (config.plot.bar_align.toLowerCase() !== 'center') + ) { + logme('ERROR: Property config.plot.bar_align can be one of "left", or "center".'); + + return; + } + + graphBarAlign = config.plot.bar_align.toLowerCase(); + + return; + } + + function createMovingLabelFunctions() { + var c1, returnStatus; + + returnStatus = true; + movingLabels = []; + + if (config.plot.hasOwnProperty('moving_label') !== true) { + returnStatus = true; + } else if ($.isPlainObject(config.plot.moving_label) === true) { + if (processMovingLabel(config.plot.moving_label) === false) { + returnStatus = false; + } + } else if ($.isArray(config.plot.moving_label) === true) { + for (c1 = 0; c1 < config.plot.moving_label.length; c1++) { + if (processMovingLabel(config.plot.moving_label[c1]) === false) { + returnStatus = false; + } + } + } + + return returnStatus; + } + + function processMovingLabel(obj) { + var labelText, funcString, disableAutoReturn, paramNames, func, + fontWeight, fontColor; + + if (obj.hasOwnProperty('@text') === false) { + logme('ERROR: You did not define a "text" attribute for the moving_label.'); + + return false; + } + if (typeof obj['@text'] !== 'string') { + logme('ERROR: "text" attribute is not a string.'); + + return false; + } + labelText = obj['@text']; + + if (obj.hasOwnProperty('#text') === false) { + logme('ERROR: moving_label is missing function declaration.'); + + return false; + } + if (typeof obj['#text'] !== 'string') { + logme('ERROR: Function declaration is not a string.'); + + return false; + } + funcString = obj['#text']; + + fontColor = 'black'; + if ( + (obj.hasOwnProperty('@color') === true) && + (typeof obj['@color'] === 'string') + ) { + fontColor = obj['@color']; + } + + fontWeight = 'normal'; + if ( + (obj.hasOwnProperty('@weight') === true) && + (typeof obj['@weight'] === 'string') + ) { + if ( + (obj['@weight'].toLowerCase() === 'normal') || + (obj['@weight'].toLowerCase() === 'bold') + ) { + fontWeight = obj['@weight']; + } else { + logme('ERROR: Moving label can have a weight property of "normal" or "bold".'); + } + } + + disableAutoReturn = obj['@disable_auto_return']; + + funcString = $('
').html(funcString).text(); + + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + paramNames = state.getAllParameterNames(); + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from the string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + + return false; + } + + paramNames.pop(); + + movingLabels.push({ + 'labelText': labelText, + 'func': func, + 'el': null, + 'fontColor': fontColor, + 'fontWeight': fontWeight + }); + + return true; + } + + function createMarkingsFunctions() { + var c1, paramNames, returnStatus; + + returnStatus = true; + + asymptotes = []; + paramNames = state.getAllParameterNames(); + + if ($.isPlainObject(config.plot.asymptote)) { + if (processAsymptote(config.plot.asymptote) === false) { + returnStatus = false; + } + } else if ($.isArray(config.plot.asymptote)) { + for (c1 = 0; c1 < config.plot.asymptote.length; c1 += 1) { + if (processAsymptote(config.plot.asymptote[c1]) === false) { + returnStatus = false; + } + } + } + + return returnStatus; + + // Read configuration options for asymptotes, and store them as + // an array of objects. Each object will have 3 properties: + // + // - color: the color of the asymptote line + // - type: 'x' (vertical), or 'y' (horizontal) + // - func: the function that will generate the value at which + // the asymptote will be plotted; i.e. x = func(), or + // y = func(); for now only horizontal and vertical + // asymptotes are supported + // + // Since each asymptote can have a variable function - function + // that relies on some parameter specified in the config - we will + // generate each asymptote just before we draw the graph. See: + // + // function updatePlot() + // function generateMarkings() + // + // Asymptotes are really thin rectangles implemented via the Flot's + // markings option. + function processAsymptote(asyObj) { + var newAsyObj, funcString, func; + + newAsyObj = {}; + + if (typeof asyObj['@type'] === 'string') { + if (asyObj['@type'].toLowerCase() === 'x') { + newAsyObj.type = 'x'; + } else if (asyObj['@type'].toLowerCase() === 'y') { + newAsyObj.type = 'y'; + } else { + logme('ERROR: Attribute "type" for asymptote can be "x" or "y".'); + + return false; + } + } else { + logme('ERROR: Attribute "type" for asymptote is not specified.'); + + return false; + } + + if (typeof asyObj['#text'] === 'string') { + funcString = asyObj['#text']; + } else { + logme('ERROR: Function body for asymptote is not specified.'); + + return false; + } + + newAsyObj.color = '#000'; + if (typeof asyObj['@color'] === 'string') { + newAsyObj.color = asyObj['@color']; + } + + newAsyObj.label = false; + if ( + (asyObj.hasOwnProperty('@label') === true) && + (typeof asyObj['@label'] === 'string') + ) { + newAsyObj.label = asyObj['@label']; + } + + funcString = $('
').html(funcString).text(); + + disableAutoReturn = asyObj['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + paramNames.push(funcString); + + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme('ERROR: Asymptote function body could not be converted to function object.'); + logme('Error message: "".' + err.message); + + return false; + } + + paramNames.pop(); + + newAsyObj.func = func; + asymptotes.push(newAsyObj); + + return true; + } + } + + function setGraphAxes() { + xaxis = { + 'tickFormatter': null + }; + + if (typeof config.plot['xticks'] === 'string') { + if (processTicks(config.plot['xticks'], xaxis, 'xunits') === false) { + logme('ERROR: Could not process the ticks for x-axis.'); + + return false; + } + } else { + // logme('MESSAGE: "xticks" were not specified. Using defaults.'); + + return false; + } + + yaxis = { + 'tickFormatter': null + }; + if (typeof config.plot['yticks'] === 'string') { + if (processTicks(config.plot['yticks'], yaxis, 'yunits') === false) { + logme('ERROR: Could not process the ticks for y-axis.'); + + return false; + } + } else { + // logme('MESSAGE: "yticks" were not specified. Using defaults.'); + + return false; + } + + xTicksNames = null; + yTicksNames = null; + + if (checkForTicksNames('x') === false) { + return false; + } + + if (checkForTicksNames('y') === false) { + return false; + } + + return true; + + // + // function checkForTicksNames(axisName) + // + // The parameter "axisName" can be either "x" or "y" (string). Depending on it, the function + // will set "xTicksNames" or "yTicksNames" private variable. + // + // This function does not return anything. It sets the private variable "xTicksNames" ("yTicksNames") + // to the object converted by JSON.parse from the XML parameter "plot.xticks_names" ("plot.yticks_names"). + // If the "plot.xticks_names" ("plot.yticks_names") is missing or it is not a valid JSON string, then + // "xTicksNames" ("yTicksNames") will be set to "null". + // + // Depending on the "xTicksNames" ("yTicksNames") being "null" or an object, the plot will either draw + // number ticks, or use the names specified by the opbject. + // + function checkForTicksNames(axisName) { + var tmpObj; + + if ((axisName !== 'x') && (axisName !== 'y')) { + // This is not an error. This funcion should simply stop executing. + + return true; + } + + if ( + (config.plot.hasOwnProperty(axisName + 'ticks_names') === true) || + (typeof config.plot[axisName + 'ticks_names'] === 'string') + ) { + try { + tmpObj = JSON.parse(config.plot[axisName + 'ticks_names']); + } catch (err) { + logme( + 'ERROR: plot.' + axisName + 'ticks_names is not a valid JSON string.', + 'Error message: "' + err.message + '".' + ); + + return false; + } + + if (axisName === 'x') { + xTicksNames = tmpObj; + xaxis.tickFormatter = xAxisTickFormatter; + } + // At this point, we are certain that axisName = 'y'. + else { + yTicksNames = tmpObj; + yaxis.tickFormatter = yAxisTickFormatter; + } + } + } + + function processTicks(ticksStr, ticksObj, unitsType) { + var ticksBlobs, tempFloat, tempTicks, c1, c2; + + // The 'ticks' setting is a string containing 3 floating-point + // numbers. + ticksBlobs = ticksStr.split(','); + + if (ticksBlobs.length !== 3) { + logme('ERROR: Did not get 3 blobs from ticksStr = "' + ticksStr + '".'); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[0]); + if (isNaN(tempFloat) === false) { + ticksObj.min = tempFloat; + } else { + logme('ERROR: Invalid "min". ticksBlobs[0] = ', ticksBlobs[0]); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[1]); + if (isNaN(tempFloat) === false) { + ticksObj.tickSize = tempFloat; + } else { + logme('ERROR: Invalid "tickSize". ticksBlobs[1] = ', ticksBlobs[1]); + + return false; + } + + tempFloat = parseFloat(ticksBlobs[2]); + if (isNaN(tempFloat) === false) { + ticksObj.max = tempFloat; + } else { + logme('ERROR: Invalid "max". ticksBlobs[2] = ', ticksBlobs[2]); + + return false; + } + + // Is the starting tick to the left of the ending tick (on the + // x-axis)? If not, set default starting and ending tick. + if (ticksObj.min >= ticksObj.max) { + logme('ERROR: Ticks min >= max.'); + + return false; + } + + // Make sure the range makes sense - i.e. that there are at + // least 3 ticks. If not, set a tickSize which will produce + // 11 ticks. tickSize is the spacing between the ticks. + if (ticksObj.tickSize > ticksObj.max - ticksObj.min) { + logme('ERROR: tickSize > max - min.'); + + return false; + } + + // units: change last tick to units + if (typeof config.plot[unitsType] === 'string') { + tempTicks = []; + + for (c1 = ticksObj.min; c1 <= ticksObj.max; c1 += ticksObj.tickSize) { + c2 = roundToPrec(c1, ticksObj.tickSize); + tempTicks.push([c2, c2]); + } + + tempTicks.pop(); + tempTicks.push([ + roundToPrec(ticksObj.max, ticksObj.tickSize), + config.plot[unitsType] + ]); + + ticksObj.tickSize = null; + ticksObj.ticks = tempTicks; + } + + return true; + + function roundToPrec(num, prec) { + var c1, tn1, tn2, digitsBefore, digitsAfter; + + tn1 = Math.abs(num); + tn2 = Math.abs(prec); + + // Find out number of digits BEFORE the decimal point. + c1 = 0; + tn1 = Math.abs(num); + while (tn1 >= 1) { + c1 += 1; + + tn1 /= 10; + } + digitsBefore = c1; + + // Find out number of digits AFTER the decimal point. + c1 = 0; + tn1 = Math.abs(num); + while (Math.round(tn1) !== tn1) { + c1 += 1; + + tn1 *= 10; + } + digitsAfter = c1; + + // For precision, find out number of digits AFTER the + // decimal point. + c1 = 0; + while (Math.round(tn2) !== tn2) { + c1 += 1; + + tn2 *= 10; + } + + // If precision is more than 1 (no digits after decimal + // points). + if (c1 === 0) { + return num; + } + + // If the precision contains digits after the decimal + // point, we apply special rules. + else { + tn1 = Math.abs(num); + + // if (digitsAfter > c1) { + tn1 = tn1.toFixed(c1); + // } else { + // tn1 = tn1.toPrecision(digitsBefore + digitsAfter); + // } + } + + if (num < 0) { + return -tn1; + } + + return tn1; + } + } + } + + function setGraphXRange() { + var xRangeStr, xRangeBlobs, tempNum, allParamNames, funcString, + disableAutoReturn; + + xrange = {}; + + if ($.isPlainObject(config.plot.xrange) === false) { + logme( + 'ERROR: Expected config.plot.xrange to be an object. ' + + 'It is not.' + ); + logme('config.plot.xrange = ', config.plot.xrange); + + return false; + } + + if (config.plot.xrange.hasOwnProperty('min') === false) { + logme( + 'ERROR: Expected config.plot.xrange.min to be ' + + 'present. It is not.' + ); + + return false; + } + + disableAutoReturn = false; + if (typeof config.plot.xrange.min === 'string') { + funcString = config.plot.xrange.min; + } else if ( + ($.isPlainObject(config.plot.xrange.min) === true) && + (config.plot.xrange.min.hasOwnProperty('#text') === true) && + (typeof config.plot.xrange.min['#text'] === 'string') + ) { + funcString = config.plot.xrange.min['#text']; + + disableAutoReturn = + config.plot.xrange.min['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + disableAutoReturn = false; + } else { + disableAutoReturn = true; + } + } else { + logme( + 'ERROR: Could not get a function definition for ' + + 'xrange.min property.' + ); + + return false; + } + + funcString = $('
').html(funcString).text(); + + if (disableAutoReturn === false) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + allParamNames = state.getAllParameterNames(); + + allParamNames.push(funcString); + try { + xrange.min = Function.apply(null, allParamNames); + } catch (err) { + logme( + 'ERROR: could not create a function from the string "' + + funcString + '" for xrange.min.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html( + '
' + 'ERROR IN ' + + 'XML: Could not create a function from the string "' + + funcString + '" for xrange.min.' + '
' + ); + $('#' + gstId).append( + '
' + 'Error ' + + 'message: "' + err.message + '".' + '
' + ); + + return false; + } + allParamNames.pop(); + + if (config.plot.xrange.hasOwnProperty('max') === false) { + logme( + 'ERROR: Expected config.plot.xrange.max to be ' + + 'present. It is not.' + ); + + return false; + } + + disableAutoReturn = false; + if (typeof config.plot.xrange.max === 'string') { + funcString = config.plot.xrange.max; + } else if ( + ($.isPlainObject(config.plot.xrange.max) === true) && + (config.plot.xrange.max.hasOwnProperty('#text') === true) && + (typeof config.plot.xrange.max['#text'] === 'string') + ) { + funcString = config.plot.xrange.max['#text']; + + disableAutoReturn = + config.plot.xrange.max['@disable_auto_return']; + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + disableAutoReturn = false; + } else { + disableAutoReturn = true; + } + } else { + logme( + 'ERROR: Could not get a function definition for ' + + 'xrange.max property.' + ); + + return false; + } + + funcString = $('
').html(funcString).text(); + + if (disableAutoReturn === false) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + allParamNames.push(funcString); + try { + xrange.max = Function.apply(null, allParamNames); + } catch (err) { + logme( + 'ERROR: could not create a function from the string "' + + funcString + '" for xrange.max.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html( + '
' + 'ERROR IN ' + + 'XML: Could not create a function from the string "' + + funcString + '" for xrange.max.' + '
' + ); + $('#' + gstId).append( + '
' + 'Error message: "' + + err.message + '".' + '
' + ); + + return false; + } + allParamNames.pop(); + + tempNum = parseInt(config.plot.num_points, 10); + if (isFinite(tempNum) === false) { + tempNum = plotDiv.width() / 5.0; + } + + if ( + (tempNum < 2) && + (tempNum > 1000) + ) { + logme( + 'ERROR: Number of points is outside the allowed range ' + + '[2, 1000]' + ); + logme('config.plot.num_points = ' + tempNum); + + return false; + } + + numPoints = tempNum; + + return true; + } + + function createFunctions() { + var c1; + + functions = []; + + if (typeof config.functions === 'undefined') { + logme('ERROR: config.functions is undefined.'); + + return; + } + + if (typeof config.functions.function === 'string') { + + // If just one function string is present. + addFunction(config.functions.function); + + } else if ($.isPlainObject(config.functions.function) === true) { + + // If a function is present, but it also has properties + // defined. + callAddFunction(config.functions.function); + + } else if ($.isArray(config.functions.function)) { + + // If more than one function is defined. + for (c1 = 0; c1 < config.functions.function.length; c1 += 1) { + + // For each definition, we must check if it is a simple + // string definition, or a complex one with properties. + if (typeof config.functions.function[c1] === 'string') { + + // Simple string. + addFunction(config.functions.function[c1]); + + } else if ($.isPlainObject(config.functions.function[c1])) { + + // Properties are present. + callAddFunction(config.functions.function[c1]); + + } + } + } else { + logme('ERROR: config.functions.function is of an unsupported type.'); + + return; + } + + return; + + // This function will reduce code duplication. We have to call + // the function addFunction() several times passing object + // properties as parameters. Rather than writing them out every + // time, we will have a single place where it is done. + function callAddFunction(obj) { + if ( + (obj.hasOwnProperty('@output')) && + (typeof obj['@output'] === 'string') + ) { + + // If this function is meant to be calculated for an + // element then skip it. + if ((obj['@output'].toLowerCase() === 'element') || + (obj['@output'].toLowerCase() === 'none')) { + return; + } + + // If this function is meant to be calculated for a + // dynamic element in a label then skip it. + else if (obj['@output'].toLowerCase() === 'plot_label') { + return; + } + + // It is an error if '@output' is not 'element', + // 'plot_label', or 'graph'. However, if the '@output' + // attribute is omitted, we will not have reached this. + else if (obj['@output'].toLowerCase() !== 'graph') { + logme( + 'ERROR: Function "output" attribute can be ' + + 'either "element", "plot_label", "none" or "graph".' + ); + + return; + } + + } + + // The user did not specify an "output" attribute, or it is + // "graph". + addFunction( + obj['#text'], + obj['@color'], + obj['@line'], + obj['@dot'], + obj['@label'], + obj['@point_size'], + obj['@fill_area'], + obj['@bar'], + obj['@disable_auto_return'] + ); + } + + function addFunction(funcString, color, line, dot, label, + pointSize, fillArea, bar, disableAutoReturn) { + + var newFunctionObject, func, paramNames, c1, rgxp; + + // The main requirement is function string. Without it we can't + // create a function, and the series cannot be calculated. + if (typeof funcString !== 'string') { + return; + } + + // Make sure that any HTML entities that were escaped will be + // unescaped. This is done because if a string with escaped + // HTML entities is passed to the Function() constructor, it + // will break. + funcString = $('
').html(funcString).text(); + + // If the user did not specifically turn off this feature, + // check if the function string contains a 'return', and + // prepend a 'return ' to the string if one, or more, is not + // found. + if ( + (disableAutoReturn === undefined) || + ( + (typeof disableAutoReturn === 'string') && + (disableAutoReturn.toLowerCase() !== 'true') + ) + ) { + if (funcString.search(/return/i) === -1) { + funcString = 'return ' + funcString; + } + } else { + if (funcString.search(/return/i) === -1) { + logme( + 'ERROR: You have specified a JavaScript ' + + 'function without a "return" statemnt. Your ' + + 'function will return "undefined" by default.' + ); + } + } + + // Some defaults. If no options are set for the graph, we will + // make sure that at least a line is drawn for a function. + newFunctionObject = { + 'line': true, + 'dot': false, + 'bars': false + }; + + // Get all of the parameter names defined by the user in the + // XML. + paramNames = state.getAllParameterNames(); + + // The 'x' is always one of the function parameters. + paramNames.push('x'); + + // Must make sure that the function body also gets passed to + // the Function constructor. + paramNames.push(funcString); + + // Create the function from the function string, and all of the + // available parameters AND the 'x' variable as it's parameters. + // For this we will use the built-in Function object + // constructor. + // + // If something goes wrong during this step, most + // likely the user supplied an invalid JavaScript function body + // string. In this case we will not proceed. + try { + func = Function.apply(null, paramNames); + } catch (err) { + logme( + 'ERROR: The function body "' + + funcString + + '" was not converted by the Function constructor.' + ); + logme('Error message: "' + err.message + '"'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not create a function from the string "' + funcString + '".' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + paramNames.pop(); + paramNames.pop(); + + return; + } + + // Return the array back to original state. Remember that it is + // a pointer to original array which is stored in state object. + paramNames.pop(); + paramNames.pop(); + + newFunctionObject['func'] = func; + + if (typeof color === 'string') { + newFunctionObject['color'] = color; + } + + if (typeof line === 'string') { + if (line.toLowerCase() === 'true') { + newFunctionObject['line'] = true; + } else if (line.toLowerCase() === 'false') { + newFunctionObject['line'] = false; + } + } + + if (typeof dot === 'string') { + if (dot.toLowerCase() === 'true') { + newFunctionObject['dot'] = true; + } else if (dot.toLowerCase() === 'false') { + newFunctionObject['dot'] = false; + } + } + + if (typeof pointSize === 'string') { + newFunctionObject['pointSize'] = pointSize; + } + + if (typeof bar === 'string') { + if (bar.toLowerCase() === 'true') { + newFunctionObject['bars'] = true; + } else if (bar.toLowerCase() === 'false') { + newFunctionObject['bars'] = false; + } + } + + if (newFunctionObject['bars'] === true) { + newFunctionObject['line'] = false; + newFunctionObject['dot'] = false; + // To do: See if need to do anything here. + } else if ( + (newFunctionObject['dot'] === false) && + (newFunctionObject['line'] === false) + ) { + newFunctionObject['line'] = true; + } + + if (newFunctionObject['line'] === true) { + if (typeof fillArea === 'string') { + if (fillArea.toLowerCase() === 'true') { + newFunctionObject['fillArea'] = true; + } else if (fillArea.toLowerCase() === 'false') { + newFunctionObject['fillArea'] = false; + } else { + logme('ERROR: The attribute fill_area should be either "true" or "false".'); + logme('fill_area = "' + fillArea + '".'); + + return; + } + } + } + + if (typeof label === 'string') { + + newFunctionObject.specialLabel = false; + newFunctionObject.pldeHash = []; + + // Let's check the label against all of the plde objects. + // plde is an abbreviation for Plot Label Dynamic Elements. + for (c1 = 0; c1 < state.plde.length; c1 += 1) { + rgxp = new RegExp(state.plde[c1].elId, 'g'); + + // If we find a dynamic element in the label, we will + // hash the current plde object, and indicate that this + // is a special label. + if (rgxp.test(label) === true) { + newFunctionObject.specialLabel = true; + newFunctionObject.pldeHash.push(state.plde[c1]); + } + } + + newFunctionObject.label = label; + } else { + newFunctionObject.label = false; + } + + functions.push(newFunctionObject); + } + } + + // The callback that will be called whenever a constant changes (gets + // updated via a slider or a text input). + function onUpdatePlot(event) { + if (generateData() === true) { + updatePlot(); + } + } + + function generateData() { + var c0, c1, c3, functionObj, seriesObj, dataPoints, paramValues, x, y, + start, end, step, numNotUndefined; + + paramValues = state.getAllParameterValues(); + + dataSeries = []; + + for (c0 = 0; c0 < functions.length; c0 += 1) { + functionObj = functions[c0]; + + try { + start = xrange.min.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not determine xrange start.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not determine xrange start from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + try { + end = xrange.max.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not determine xrange end.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not determine xrange end from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + + seriesObj = {}; + dataPoints = []; + + // For counting number of points added. In the end we will + // compare this number to 'numPoints' specified in the config + // JSON. + c1 = 0; + + step = (end - start) / (numPoints - 1); + + // Generate the data points. + for (x = start; x <= end; x += step) { + + // Push the 'x' variable to the end of the parameter array. + paramValues.push(x); + + // We call the user defined function, passing all of the + // available parameter values. Inside this function they + // will be accessible by their names. + try { + y = functionObj.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate data.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not generate data from defined function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + + // Return the paramValues array to how it was before we + // added 'x' variable to the end of it. + paramValues.pop(); + + // Add the generated point to the data points set. + dataPoints.push([x, y]); + + c1 += 1; + + } + + // If the last point did not get included because of rounding + // of floating-point number addition, then we will include it + // manually. + if (c1 != numPoints) { + x = end; + paramValues.push(x); + try { + y = functionObj.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate data.'); + logme('Error message: "' + err.message + '".'); + + $('#' + gstId).html('
' + 'ERROR IN XML: Could not generate data from function.' + '
'); + $('#' + gstId).append('
' + 'Error message: "' + err.message + '".' + '
'); + + return false; + } + paramValues.pop(); + dataPoints.push([x, y]); + } + + // Put the entire data points set into the series object. + seriesObj.data = dataPoints; + + // See if user defined a specific color for this function. + if (functionObj.hasOwnProperty('color') === true) { + seriesObj.color = functionObj.color; + } + + // See if a user defined a label for this function. + if (functionObj.label !== false) { + if (functionObj.specialLabel === true) { + (function (c1) { + var tempLabel; + + tempLabel = functionObj.label; + + while (c1 < functionObj.pldeHash.length) { + tempLabel = tempLabel.replace( + functionObj.pldeHash[c1].elId, + functionObj.pldeHash[c1].func.apply( + window, + state.getAllParameterValues() + ) + ); + + c1 += 1; + } + + seriesObj.label = tempLabel; + }(0)); + } else { + seriesObj.label = functionObj.label; + } + } + + // Should the data points be connected by a line? + seriesObj.lines = { + 'show': functionObj.line + }; + + if (functionObj.hasOwnProperty('fillArea') === true) { + seriesObj.lines.fill = functionObj.fillArea; + } + + // Should each data point be represented by a point on the + // graph? + seriesObj.points = { + 'show': functionObj.dot + }; + + seriesObj.bars = { + 'show': functionObj.bars, + 'barWidth': graphBarWidth + }; + + if (graphBarAlign !== null) { + seriesObj.bars.align = graphBarAlign; + } + + if (functionObj.hasOwnProperty('pointSize')) { + seriesObj.points.radius = functionObj.pointSize; + } + + // Add the newly created series object to the series set which + // will be plotted by Flot. + dataSeries.push(seriesObj); + } + + if (graphBarAlign === null) { + for (c0 = 0; c0 < numPoints; c0 += 1) { + // Number of points that have a value other than 'undefined' (undefined). + numNotUndefined = 0; + + for (c1 = 0; c1 < dataSeries.length; c1 += 1) { + if (dataSeries[c1].bars.show === false) { + continue; + } + + if (isFinite(parseInt(dataSeries[c1].data[c0][1])) === true) { + numNotUndefined += 1; + } + } + + c3 = 0; + for (c1 = 0; c1 < dataSeries.length; c1 += 1) { + if (dataSeries[c1].bars.show === false) { + continue; + } + + dataSeries[c1].data[c0][0] -= graphBarWidth * (0.5 * numNotUndefined - c3); + + if (isFinite(parseInt(dataSeries[c1].data[c0][1])) === true) { + c3 += 1; + } + } + } + } + + for (c0 = 0; c0 < asymptotes.length; c0 += 1) { + + // If the user defined a label for this asympote, then the + // property 'label' will be a string (in the other case it is + // a boolean value 'false'). We will create an empty data set, + // and add to it a label. This solution is a bit _wrong_ , but + // it will have to do for now. Flot JS does not provide a way + // to add labels to markings, and we use markings to generate + // asymptotes. + if (asymptotes[c0].label !== false) { + dataSeries.push({ + 'data': [], + 'label': asymptotes[c0].label, + 'color': asymptotes[c0].color + }); + } + + } + + return true; + } // End-of: function generateData + + function updatePlot() { + var paramValues, plotObj; + + paramValues = state.getAllParameterValues(); + + if (xaxis.tickFormatter !== null) { + xaxis.ticks = null; + } + + if (yaxis.tickFormatter !== null) { + yaxis.ticks = null; + } + + // Tell Flot to draw the graph to our specification. + plotObj = $.plot( + plotDiv, + dataSeries, + { + 'xaxis': xaxis, + 'yaxis': yaxis, + 'legend': { + + // To show the legend or not. Note, even if 'show' is + // 'true', the legend will only show if labels are + // provided for at least one of the series that are + // going to be plotted. + 'show': true, + + // A floating point number in the range [0, 1]. The + // smaller the number, the more transparent will the + // legend background become. + 'backgroundOpacity': 0 + + }, + 'grid': { + 'markings': generateMarkings() + } + } + ); + + updateMovingLabels(); + + // The first time that the graph gets added to the page, the legend + // is created from scratch. When it appears, MathJax works some + // magic, and all of the specially marked TeX gets rendered nicely. + // The next time when we update the graph, no such thing happens. + // We must ask MathJax to typeset the legend again (well, we will + // ask it to look at our entire graph DIV), the next time it's + // worker queue is available. + MathJax.Hub.Queue([ + 'Typeset', + MathJax.Hub, + plotDiv.attr('id') + ]); + + return; + + function updateMovingLabels() { + var c1, labelCoord, pointOffset; + + for (c1 = 0; c1 < movingLabels.length; c1 += 1) { + if (movingLabels[c1].el === null) { + movingLabels[c1].el = $( + '
' + + movingLabels[c1].labelText + + '
' + ); + movingLabels[c1].el.css('position', 'absolute'); + movingLabels[c1].el.css('color', movingLabels[c1].fontColor); + movingLabels[c1].el.css('font-weight', movingLabels[c1].fontWeight); + movingLabels[c1].el.appendTo(plotDiv); + + movingLabels[c1].elWidth = movingLabels[c1].el.width(); + movingLabels[c1].elHeight = movingLabels[c1].el.height(); + } else { + movingLabels[c1].el.detach(); + movingLabels[c1].el.appendTo(plotDiv); + } + + labelCoord = movingLabels[c1].func.apply(window, paramValues); + + pointOffset = plotObj.pointOffset({'x': labelCoord.x, 'y': labelCoord.y}); + + movingLabels[c1].el.css('left', pointOffset.left - 0.5 * movingLabels[c1].elWidth); + movingLabels[c1].el.css('top', pointOffset.top - 0.5 * movingLabels[c1].elHeight); + } + } + + // Generate markings to represent asymptotes defined by the user. + // See the following function for more details: + // + // function processAsymptote() + // + function generateMarkings() { + var c1, asymptote, markings, val; + + markings = []; + + for (c1 = 0; c1 < asymptotes.length; c1 += 1) { + asymptote = asymptotes[c1]; + + try { + val = asymptote.func.apply(window, paramValues); + } catch (err) { + logme('ERROR: Could not generate value from asymptote function.'); + logme('Error message: ', err.message); + + continue; + } + + if (asymptote.type === 'x') { + markings.push({ + 'color': asymptote.color, + 'lineWidth': 2, + 'xaxis': { + 'from': val, + 'to': val + } + }); + } else { + markings.push({ + 'color': asymptote.color, + 'lineWidth': 2, + 'yaxis': { + 'from': val, + 'to': val + } + }); + + } + } + + return markings; + } + } + + function xAxisTickFormatter(val, axis) { + if (xTicksNames.hasOwnProperty(val.toFixed(axis.tickDecimals)) === true) { + return xTicksNames[val.toFixed(axis.tickDecimals)]; + } + + return ''; + } + + function yAxisTickFormatter(val, axis) { + if (yTicksNames.hasOwnProperty(val.toFixed(axis.tickDecimals)) === true) { + return yTicksNames[val.toFixed(axis.tickDecimals)]; + } + + return ''; + } + } + + +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js new file mode 100644 index 0000000000..73252455d0 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst.js @@ -0,0 +1,22 @@ +/* + * We will add a function that will be called for all GraphicalSliderTool + * xmodule module instances. It must be available globally by design of + * xmodule. + */ +window.GraphicalSliderTool = function (el) { + // All the work will be performed by the GstMain module. We will get access + // to it, and all it's dependencies, via Require JS. Currently Require JS + // is namespaced and is available via a global object RequireJS. + RequireJS.require(['GstMain'], function (GstMain) { + // The GstMain module expects the DOM ID of a Graphical Slider Tool + // element. Since we are given a
element which might in + // theory contain multiple graphical_slider_tool
elements (each + // with a unique DOM ID), we will iterate over all children, and for + // each match, we will call GstMain module. + $(el).children('.graphical_slider_tool').each(function (index, value) { + JavascriptLoader.executeModuleScripts($(value), function(){ + GstMain($(value).attr('id')); + }); + }); + }); +}; diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js new file mode 100644 index 0000000000..3d9f511ca9 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/gst_main.js @@ -0,0 +1,84 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define( + 'GstMain', + + // Even though it is not explicitly in this module, we have to specify + // 'GeneralMethods' as a dependency. It expands some of the core JS objects + // with additional useful methods that are used in other modules. + ['State', 'GeneralMethods', 'Sliders', 'Inputs', 'Graph', 'ElOutput', 'GLabelElOutput', 'logme'], + function (State, GeneralMethods, Sliders, Inputs, Graph, ElOutput, GLabelElOutput, logme) { + + return GstMain; + + function GstMain(gstId) { + var config, gstClass, state; + + if ($('#' + gstId).attr('data-processed') !== 'processed') { + $('#' + gstId).attr('data-processed', 'processed'); + } else { + // logme('MESSAGE: Already processed GST with ID ' + gstId + '. Skipping.'); + + return; + } + + // Get the JSON configuration, parse it, and store as an object. + try { + config = JSON.parse($('#' + gstId + '_json').html()).root; + } catch (err) { + logme('ERROR: could not parse config JSON.'); + logme('$("#" + gstId + "_json").html() = ', $('#' + gstId + '_json').html()); + logme('JSON.parse(...) = ', JSON.parse($('#' + gstId + '_json').html())); + logme('config = ', config); + + return; + } + + // Get the class name of the GST. All elements are assigned a class + // name that is based on the class name of the GST. For example, inputs + // are assigned a class name '{GST class name}_input'. + if (typeof config['@class'] !== 'string') { + logme('ERROR: Could not get the class name of GST.'); + logme('config["@class"] = ', config['@class']); + + return; + } + gstClass = config['@class']; + + // Parse the configuration settings for parameters, and store them in a + // state object. + state = State(gstId, config); + + // It is possible that something goes wrong while extracting parameters + // from the JSON config object. In this case, we will not continue. + if (state === undefined) { + logme('ERROR: The state object was not initialized properly.'); + + return; + } + + // Create the sliders and the text inputs, attaching them to + // appropriate parameters. + Sliders(gstId, state); + Inputs(gstId, gstClass, state); + + // Configure functions that output to an element instead of the graph. + ElOutput(config, state); + + // Configure functions that output to an element instead of the graph + // label. + GLabelElOutput(config, state); + + // Configure and display the graph. Attach event for the graph to be + // updated on any change of a slider or a text input. + Graph(gstId, config, state); + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js new file mode 100644 index 0000000000..a04ed113ec --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/inputs.js @@ -0,0 +1,88 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('Inputs', ['logme'], function (logme) { + return Inputs; + + function Inputs(gstId, gstClass, state) { + var c1, paramName, allParamNames; + + allParamNames = state.getAllParameterNames(); + + for (c1 = 0; c1 < allParamNames.length; c1 += 1) { + $('#' + gstId).find('.' + gstClass + '_input').each(function (index, value) { + var inputDiv, paramName; + + paramName = allParamNames[c1]; + inputDiv = $(value); + + if (paramName === inputDiv.data('var')) { + createInput(inputDiv, paramName); + } + }); + } + + return; + + function createInput(inputDiv, paramName) { + var paramObj; + + paramObj = state.getParamObj(paramName); + + // Check that the retrieval went OK. + if (paramObj === undefined) { + logme('ERROR: Could not get a paramObj for parameter "' + paramName + '".'); + + return; + } + + // Bind a function to the 'change' event. Whenever the user changes + // the value of this text input, and presses 'enter' (or clicks + // somewhere else on the page), this event will be triggered, and + // our callback will be called. + inputDiv.bind('change', inputOnChange); + + inputDiv.val(paramObj.value); + + // Lets style the input element nicely. We will use the button() + // widget for this since there is no native widget for the text + // input. + inputDiv.button().css({ + 'font': 'inherit', + 'color': 'inherit', + 'text-align': 'left', + 'outline': 'none', + 'cursor': 'text', + 'height': '15px' + }); + + // Tell the parameter object from state that we are attaching a + // text input to it. Next time the parameter will be updated with + // a new value, tis input will also be updated. + paramObj.inputDivs.push(inputDiv); + + return; + + // Update the 'state' - i.e. set the value of the parameter this + // input is attached to to a new value. + // + // This will cause the plot to be redrawn each time after the user + // changes the value in the input. Note that he has to either press + // 'Enter', or click somewhere else on the page in order for the + // 'change' event to be tiggered. + function inputOnChange(event) { + var inputDiv; + + inputDiv = $(this); + state.setParameterValue(paramName, inputDiv.val(), inputDiv); + } + } + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js new file mode 100644 index 0000000000..7f9cd4a124 --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/jstat-1.0.0.min.js @@ -0,0 +1,236 @@ +function jstat(){} +j=jstat;(function(){var initializing=false,fnTest=/xyz/.test(function(){xyz;})?/\b_super\b/:/.*/;this.Class=function(){};Class.extend=function(prop){var _super=this.prototype;initializing=true;var prototype=new this();initializing=false;for(var name in prop){prototype[name]=typeof prop[name]=="function"&&typeof _super[name]=="function"&&fnTest.test(prop[name])?(function(name,fn){return function(){var tmp=this._super;this._super=_super[name];var ret=fn.apply(this,arguments);this._super=tmp;return ret;};})(name,prop[name]):prop[name];} +function Class(){if(!initializing&&this.init) +this.init.apply(this,arguments);} +Class.prototype=prototype;Class.constructor=Class;Class.extend=arguments.callee;return Class;};})();jstat.ONE_SQRT_2PI=0.3989422804014327;jstat.LN_SQRT_2PI=0.9189385332046727417803297;jstat.LN_SQRT_PId2=0.225791352644727432363097614947;jstat.DBL_MIN=2.22507e-308;jstat.DBL_EPSILON=2.220446049250313e-16;jstat.SQRT_32=5.656854249492380195206754896838;jstat.TWO_PI=6.283185307179586;jstat.DBL_MIN_EXP=-999;jstat.SQRT_2dPI=0.79788456080287;jstat.LN_SQRT_PI=0.5723649429247;jstat.seq=function(min,max,length){var r=new Range(min,max,length);return r.getPoints();} +jstat.dnorm=function(x,mean,sd,log){if(mean==null)mean=0;if(sd==null)sd=1;if(log==null)log=false;var n=new NormalDistribution(mean,sd);if(!isNaN(x)){return n._pdf(x,log);}else if(x.length){var res=[];for(var i=0;i
');$('#'+hash).dialog({modal:false,width:475,height:475,resizable:true,resize:function(){$.plot($('#graph-'+hash),[series],flotOpt);},open:function(event,ui){var id='#graph-'+hash;$.plot($('#graph-'+hash),[series],flotOpt);}})} +jstat.log10=function(arg){return Math.log(arg)/Math.LN10;} +jstat.toSigFig=function(num,n){if(num==0){return 0;} +var d=Math.ceil(jstat.log10(num<0?-num:num));var power=n-parseInt(d);var magnitude=Math.pow(10,power);var shifted=Math.round(num*magnitude);return shifted/magnitude;} +jstat.trunc=function(x){return(x>0)?Math.floor(x):Math.ceil(x);} +jstat.isFinite=function(x){return(!isNaN(x)&&(x!=Number.POSITIVE_INFINITY)&&(x!=Number.NEGATIVE_INFINITY));} +jstat.dopois_raw=function(x,lambda,give_log){if(lambda==0){if(x==0){return(give_log)?0.0:1.0;} +return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(!jstat.isFinite(lambda))return(give_log)?Number.NEGATIVE_INFINITY:0.0;if(x<0)return(give_log)?Number.NEGATIVE_INFINITY:0.0;if(x<=lambda*jstat.DBL_MIN){return(give_log)?-lambda:Math.exp(-lambda);} +if(lambda0.1*(x+np)){v=(x-np)/(x+np);s=(x-np)*v;ej=2*x*v;v=v*v;for(j=1;;j++){ej*=v;s1=s+ej/((j<<1)+1);if(s1==s) +return(s1);s=s1;}} +return(x*Math.log(x/np)+np-x);} +jstat.stirlerr=function(n){var S0=0.083333333333333333333;var S1=0.00277777777777777777778;var S2=0.00079365079365079365079365;var S3=0.000595238095238095238095238;var S4=0.0008417508417508417508417508;var sferr_halves=[0.0,0.1534264097200273452913848,0.0810614667953272582196702,0.0548141210519176538961390,0.0413406959554092940938221,0.03316287351993628748511048,0.02767792568499833914878929,0.02374616365629749597132920,0.02079067210376509311152277,0.01848845053267318523077934,0.01664469118982119216319487,0.01513497322191737887351255,0.01387612882307074799874573,0.01281046524292022692424986,0.01189670994589177009505572,0.01110455975820691732662991,0.010411265261972096497478567,0.009799416126158803298389475,0.009255462182712732917728637,0.008768700134139385462952823,0.008330563433362871256469318,0.007934114564314020547248100,0.007573675487951840794972024,0.007244554301320383179543912,0.006942840107209529865664152,0.006665247032707682442354394,0.006408994188004207068439631,0.006171712263039457647532867,0.005951370112758847735624416,0.005746216513010115682023589,0.005554733551962801371038690];var nn;if(n<=15.0){nn=n+n;if(nn==parseInt(nn))return(sferr_halves[parseInt(nn)]);return(jstat.lgamma(n+1.0)-(n+0.5)*Math.log(n)+n-jstat.LN_SQRT_2PI);} +nn=n*n;if(n>500)return((S0-S1/nn)/n);if(n>80)return((S0-(S1-S2/nn)/nn)/n);if(n>35)return((S0-(S1-(S2-S3/nn)/nn)/nn)/n);return((S0-(S1-(S2-(S3-S4/nn)/nn)/nn)/nn)/n);} +jstat.lgamma=function(x){function lgammafn_sign(x,sgn){var ans,y,sinpiy;var xmax=2.5327372760800758e+305;var dxrel=1.490116119384765696e-8;if(sgn!=null)sgn=1;if(isNaN(x))return x;if(x<0&&(Math.floor(-x)%2.0)==0) +if(sgn!=null)sgn=-1;if(x<=0&&x==jstat.trunc(x)){console.warn("Negative integer argument in lgammafn_sign");return Number.POSITIVE_INFINITY;} +y=Math.abs(x);if(y<=10)return Math.log(Math.abs(jstat.gamma(x)));if(y>xmax){console.warn("Illegal arguement passed to lgammafn_sign");return Number.POSITIVE_INFINITY;} +if(x>0){if(x>1e17){return(x*(Math.log(x)-1.0));}else if(x>4934720.0){return(jstat.LN_SQRT_2PI+(x-0.5)*Math.log(x)-x);}else{return jstat.LN_SQRT_2PI+(x-0.5)*Math.log(x)-x+jstat.lgammacor(x);}} +sinpiy=Math.abs(Math.sin(Math.PI*y));if(sinpiy==0){throw"Should never happen!!";} +ans=jstat.LN_SQRT_PId2+(x-0.5)*Math.log(y)-x-Math.log(sinpiy)-jstat.lgammacor(y);if(Math.abs((x-jstat.trunc(x-0.5))*ans/x)=jstat.DBL_MIN){res=1.0/y;}else{return(Number.POSITIVE_INFINITY);}}else if(y<12.0){yi=y;if(y<1.0){z=y;y+=1.0;}else{n=parseInt(y)-1;y-=parseFloat(n);z=y-1.0;} +xnum=0.0;xden=1.0;for(i=0;i<8;++i){xnum=(xnum+p[i])*z;xden=xden*z+q[i];} +res=xnum/xden+1.0;if(yiy){for(i=0;i=xmax){throw"Underflow error in lgammacor";}else if(xMAXIT){console.warn("a or b too big, or MAXIT too small in betacf: "+a+", "+b+", "+x+", "+h);return h;} +if(isNaN(h)){console.warn(a+", "+b+", "+x);} +return h;} +var bt;if(x<0.0||x>1.0){throw"bad x in routine incompleteBeta";} +if(x==0.0||x==1.0){bt=0.0;}else{bt=Math.exp(jstat.lgamma(a+b)-jstat.lgamma(a)-jstat.lgamma(b)+a*Math.log(x)+b*Math.log(1.0-x));} +if(x<(a+1.0)/(a+b+2.0)){return bt*betacf(a,b,x)/a;}else{return 1.0-bt*betacf(b,a,1.0-x)/b;}} +jstat.chebyshev=function(x,a,n){var b0,b1,b2,twox;var i;if(n<1||n>1000)return Number.NaN;if(x<-1.1||x>1.1)return Number.NaN;twox=x*2;b2=b1=0;b0=0;for(i=1;i<=n;i++){b2=b1;b1=b0;b0=twox*b1-b2+a[n-i];} +return(b0-b2)*0.5;} +jstat.fmin2=function(x,y){return(x1){return Math.log(1+x);} +for(var i=1;i0.697)return Math.exp(x)-1;if(a>1e-8){y=Math.exp(x)-1;}else{y=(x/2+1)*x;} +y-=(1+y)*(jstat.log1p(y)-x);return y;} +jstat.logBeta=function(a,b){var corr,p,q;p=q=a;if(bq)q=b;if(p<0){console.warn('Both arguements must be >= 0');return Number.NaN;} +else if(p==0){return Number.POSITIVE_INFINITY;} +else if(!jstat.isFinite(q)){return Number.NEGATIVE_INFINITY;} +if(p>=10){corr=jstat.lgammacor(p)+jstat.lgammacor(q)-jstat.lgammacor(p+q);return Math.log(q)*-0.5+jstat.LN_SQRT_2PI+corr ++(p-0.5)*Math.log(p/(p+q))+q*jstat.log1p(-p/(p+q));} +else if(q>=10){corr=jstat.lgammacor(q)-jstat.lgammacor(p+q);return jstat.lgamma(p)+corr+p-p*Math.log(p+q) ++(q-0.5)*jstat.log1p(-p/(p+q));} +else +return Math.log(jstat.gamma(p)*(jstat.gamma(q)/jstat.gamma(p+q)));} +jstat.dbinom_raw=function(x,n,p,q,give_log){if(give_log==null)give_log=false;var lf,lc;if(p==0){if(x==0){return(give_log)?0.0:1.0;}else{return(give_log)?Number.NEGATIVE_INFINITY:0.0;}} +if(q==0){if(x==n){return(give_log)?0.0:1.0;}else{return(give_log)?Number.NEGATIVE_INFINITY:0.0;}} +if(x==0){if(n==0)return(give_log)?0.0:1.0;lc=(p<0.1)?-jstat.bd0(n,n*q)-n*p:n*Math.log(q);return(give_log)?lc:Math.exp(lc);} +if(x==n){lc=(q<0.1)?-jstat.bd0(n,n*p)-n*q:n*Math.log(p);return(give_log)?lc:Math.exp(lc);} +if(x<0||x>n)return(give_log)?Number.NEGATIVE_INFINITY:0.0;lc=jstat.stirlerr(n)-jstat.stirlerr(x)-jstat.stirlerr(n-x)-jstat.bd0(x,n*p)-jstat.bd0(n-x,n*q);lf=Math.log(jstat.TWO_PI)+Math.log(x)+jstat.log1p(-x/n);return(give_log)?lc-0.5*lf:Math.exp(lc-0.5*lf);} +jstat.max=function(values){var max=Number.NEGATIVE_INFINITY;for(var i=0;imax){max=values[i];}} +return max;} +var Range=Class.extend({init:function(min,max,numPoints){this._minimum=parseFloat(min);this._maximum=parseFloat(max);this._numPoints=parseFloat(numPoints);},getMinimum:function(){return this._minimum;},getMaximum:function(){return this._maximum;},getNumPoints:function(){return this._numPoints;},getPoints:function(){var results=[];var x=this._minimum;var step=(this._maximum-this._minimum)/(this._numPoints-1);for(var i=0;ieps){xsq=x*x;xnum=a[4]*xsq;xden=xsq;for(i=0;i<3;++i){xnum=(xnum+a[i])*xsq;xden=(xden+b[i])*xsq;}}else{xnum=xden=0.0;} +temp=x*(xnum+a[3])/(xden+b[3]);if(lower)cum=0.5+temp;if(upper)ccum=0.5-temp;if(log_p){if(lower)cum=Math.log(cum);if(upper)ccum=Math.log(ccum);}}else if(y<=jstat.SQRT_32){xnum=c[8]*y;xden=y;for(i=0;i<7;++i){xnum=(xnum+c[i])*y;xden=(xden+d[i])*y;} +temp=(xnum+c[7])/(xden+d[7]);xsq=jstat.trunc(x*16)/16;del=(x-xsq)*(x+xsq);if(log_p){cum=(-xsq*xsq*0.5)+(-del*0.5)+Math.log(temp);if((lower&&x>0.)||(upper&&x<=0.)) +ccum=jstat.log1p(-Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp);} +else{cum=Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp;ccum=1.0-cum;} +if(x>0.0){temp=cum;if(lower){cum=ccum;} +ccum=temp;}} +else if((log_p&&y<1e170)||(lower&&-37.51930.)||(upper&&x<=0.)) +ccum=jstat.log1p(-Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp);} +else{cum=Math.exp(-xsq*xsq*0.5)*Math.exp(-del*0.5)*temp;ccum=1.0-cum;} +if(x>0.0){temp=cum;if(lower){cum=ccum;} +ccum=temp;}}else{if(x>0){cum=(log_p)?0.0:1.0;ccum=(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{cum=(log_p)?Number.NEGATIVE_INFINITY:0.0;ccum=(log_p)?0.0:1.0;}} +return[cum,ccum];} +var p,cp;var mu=this._mean;var sigma=this._sigma;var R_DT_0,R_DT_1;if(lower_tail){if(log_p){R_DT_0=Number.NEGATIVE_INFINITY;R_DT_1=0.0;}else{R_DT_0=0.0;R_DT_1=1.0;}}else{if(log_p){R_DT_0=0.0;R_DT_1=Number.NEGATIVE_INFINITY;}else{R_DT_0=1.0;R_DT_1=0.0;}} +if(!jstat.isFinite(x)&&mu==x)return Number.NaN;if(sigma<=0){if(sigma<0){console.warn("Sigma is less than 0");return Number.NaN;} +return(x0){var nd=new NormalDistribution(meanlog,sdlog);return nd._cdf(Math.log(x),lower_tail,log_p);} +if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.0:1.0;}},getLocation:function(){return this._location;},getScale:function(){return this._scale;},getMean:function(){return Math.exp((this._location+this._scale)/2);},getVariance:function(){var ans=(Math.exp(this._scale)-1)*Math.exp(2*this._location+this._scale);return ans;}});var GammaDistribution=ContinuousDistribution.extend({init:function(shape,scale){this._super('Gamma');this._shape=parseFloat(shape);this._scale=parseFloat(scale);this._string="Gamma ("+this._shape.toFixed(2)+", "+this._scale.toFixed(2)+")";},_pdf:function(x,give_log){var pr;var shape=this._shape;var scale=this._scale;if(give_log==null){give_log=false;} +if(shape<0||scale<=0){throw"Illegal argument in _pdf";} +if(x<0){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(shape==0){return(x==0)?Number.POSITIVE_INFINITY:(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(x==0){if(shape<1)return Number.POSITIVE_INFINITY;if(shape>1)return(give_log)?Number.NEGATIVE_INFINITY:0.0;return(give_log)?-Math.log(scale):1/scale;} +if(shape<1){pr=jstat.dopois_raw(shape,x/scale,give_log);return give_log?pr+Math.log(shape/x):pr*shape/x;} +pr=jstat.dopois_raw(shape-1,x/scale,give_log);return give_log?pr-Math.log(scale):pr/scale;},_cdf:function(x,lower_tail,log_p){function USE_PNORM(){pn1=Math.sqrt(alph)*3.0*(Math.pow(x/alph,1.0/3.0)+1.0/(9.0*alph)-1.0);var norm_dist=new NormalDistribution(0.0,1.0);return norm_dist._cdf(pn1,lower_tail,log_p);} +if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var alph=this._shape;var scale=this._scale;var xbig=1.0e+8;var xlarge=1.0e+37;var alphlimit=1e5;var pn1,pn2,pn3,pn4,pn5,pn6,arg,a,b,c,an,osum,sum,n,pearson;if(alph<=0.||scale<=0.){console.warn('Invalid gamma params in _cdf');return Number.NaN;} +x/=scale;if(isNaN(x))return x;if(x<=0.0){if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.0:1.0;}} +if(alph>alphlimit){return USE_PNORM();} +if(x>xbig*alph){if(x>jstat.DBL_MAX*alph){if(lower_tail){return(log_p)?0.0:1.0;}else{return(log_p)?Number.NEGATIVE_INFINITY:0.0;}}else{return USE_PNORM();}} +if(x<=1.0||xjstat.DBL_EPSILON*sum);}else{pearson=0;arg=alph*Math.log(x)-x-jstat.lgamma(alph);a=1.-alph;b=a+x+1.;pn1=1.;pn2=x;pn3=x+1.;pn4=x*b;sum=pn3/pn4;for(n=1;;n++){a+=1.;b+=2.;an=a*n;pn5=b*pn3-an*pn1;pn6=b*pn4-an*pn2;if(Math.abs(pn6)>0.){osum=sum;sum=pn5/pn6;if(Math.abs(osum-sum)<=jstat.DBL_EPSILON*jstat.fmin2(1.0,sum)) +break;} +pn1=pn3;pn2=pn4;pn3=pn5;pn4=pn6;if(Math.abs(pn5)>=xlarge){pn1/=xlarge;pn2/=xlarge;pn3/=xlarge;pn4/=xlarge;}}} +arg+=Math.log(sum);lower_tail=(lower_tail==pearson);if(log_p&&lower_tail) +return(arg);if(lower_tail){return Math.exp(arg);}else{if(log_p){return(arg>-Math.LN2)?Math.log(-jstat.expm1(arg)):jstat.log1p(-Math.exp(arg));}else{return-jstat.expm1(arg);}}},getShape:function(){return this._shape;},getScale:function(){return this._scale;},getMean:function(){return this._shape*this._scale;},getVariance:function(){return this._shape*Math.pow(this._scale,2);}});var BetaDistribution=ContinuousDistribution.extend({init:function(alpha,beta){this._super('Beta');this._alpha=parseFloat(alpha);this._beta=parseFloat(beta);this._string="Beta ("+this._alpha.toFixed(2)+", "+this._beta.toFixed(2)+")";},_pdf:function(x,give_log){if(give_log==null)give_log=false;var a=this._alpha;var b=this._beta;var lval;if(a<=0||b<=0){console.warn('Illegal arguments in _pdf');return Number.NaN;} +if(x<0||x>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(x==0){if(a>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(a<1){return Number.POSITIVE_INFINITY;} +return(give_log)?Math.log(b):b;} +if(x==1){if(b>1){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(b<1){return Number.POSITIVE_INFINITY;} +return(give_log)?Math.log(a):a;} +if(a<=2||b<=2){lval=(a-1)*Math.log(x)+(b-1)*jstat.log1p(-x)-jstat.logBeta(a,b);}else{lval=Math.log(a+b-1)+jstat.dbinom_raw(a-1,a+b-2,x,1-x,true);} +return(give_log)?lval:Math.exp(lval);},_cdf:function(x,lower_tail,log_p){if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var pin=this._alpha;var qin=this._beta;if(pin<=0||qin<=0){console.warn('Invalid argument in _cdf');return Number.NaN;} +if(x<=0){if(lower_tail){return(log_p)?Number.NEGATIVE_INFINITY:0.0;}else{return(log_p)?0.1:1.0;}} +if(x>=1){if(lower_tail){return(log_p)?0.1:1.0;}else{return(log_p)?Number.NEGATIVE_INFINITY:0.0;}} +return jstat.incompleteBeta(pin,qin,x);},getAlpha:function(){return this._alpha;},getBeta:function(){return this._beta;},getMean:function(){return this._alpha/(this._alpha+this._beta);},getVariance:function(){var ans=(this._alpha*this._beta)/(Math.pow(this._alpha+this._beta,2)*(this._alpha+this._beta+1));return ans;}});var StudentTDistribution=ContinuousDistribution.extend({init:function(degreesOfFreedom,mu){this._super('StudentT');this._dof=parseFloat(degreesOfFreedom);if(mu!=null){this._mu=parseFloat(mu);this._string="StudentT ("+this._dof.toFixed(2)+", "+this._mu.toFixed(2)+")";}else{this._mu=0.0;this._string="StudentT ("+this._dof.toFixed(2)+")";}},_pdf:function(x,give_log){if(give_log==null)give_log=false;if(this._mu==null){return this._dt(x,give_log);}else{var y=this._dnt(x,give_log);if(y>1){console.warn('x:'+x+', y: '+y);} +return y;}},_cdf:function(x,lower_tail,give_log){if(lower_tail==null)lower_tail=true;if(give_log==null)give_log=false;if(this._mu==null){return this._pt(x,lower_tail,give_log);}else{return this._pnt(x,lower_tail,give_log);}},_dt:function(x,give_log){var t,u;var n=this._dof;if(n<=0){console.warn('Invalid parameters in _dt');return Number.NaN;} +if(!jstat.isFinite(x)){return(give_log)?Number.NEGATIVE_INFINITY:0.0;} +if(!jstat.isFinite(n)){var norm=new NormalDistribution(0.0,1.0);return norm.density(x,give_log);} +t=-jstat.bd0(n/2.0,(n+1)/2.0)+jstat.stirlerr((n+1)/2.0)-jstat.stirlerr(n/2.0);if(x*x>0.2*n) +u=Math.log(1+x*x/n)*n/2;else +u=-jstat.bd0(n/2.0,(n+x*x)/2.0)+x*x/2.0;var p1=jstat.TWO_PI*(1+x*x/n);var p2=t-u;return(give_log)?-0.5*Math.log(p1)+p2:Math.exp(p2)/Math.sqrt(p1);},_dnt:function(x,give_log){if(give_log==null)give_log=false;var df=this._dof;var ncp=this._mu;var u;if(df<=0.0){console.warn("Illegal arguments _dnf");return Number.NaN;} +if(ncp==0.0){return this._dt(x,give_log);} +if(!jstat.isFinite(x)){if(give_log){return Number.NEGATIVE_INFINITY;}else{return 0.0;}} +if(!isFinite(df)||df>1e8){var dist=new NormalDistribution(ncp,1.);return dist.density(x,give_log);} +if(Math.abs(x)>Math.sqrt(df*jstat.DBL_EPSILON)){var newT=new StudentTDistribution(df+2,ncp);u=Math.log(df)-Math.log(Math.abs(x))+ +Math.log(Math.abs(newT._pnt(x*Math.sqrt((df+2)/df),true,false)- +this._pnt(x,true,false)));} +else{u=jstat.lgamma((df+1)/2)-jstat.lgamma(df/2) +-.5*(Math.log(Math.PI)+Math.log(df)+ncp*ncp);} +return(give_log?u:Math.exp(u));},_pt:function(x,lower_tail,log_p){if(lower_tail==null)lower_tail=true;if(log_p==null)log_p=false;var val,nx;var n=this._dof;var DT_0,DT_1;if(lower_tail){if(log_p){DT_0=Number.NEGATIVE_INFINITY;DT_1=1.;}else{DT_0=0.;DT_1=1.;}}else{if(log_p){DT_0=0.;DT_1=Number.NEGATIVE_INFINITY;}else{DT_0=1.;DT_1=0.;}} +if(n<=0.0){console.warn("Invalid T distribution _pt");return Number.NaN;} +var norm=new NormalDistribution(0,1);if(!jstat.isFinite(x)){return(x<0)?DT_0:DT_1;} +if(!jstat.isFinite(n)){return norm._cdf(x,lower_tail,log_p);} +if(n>4e5){val=1./(4.*n);return norm._cdf(x*(1.-val)/sqrt(1.+x*x*2.*val),lower_tail,log_p);} +nx=1+(x/n)*x;if(nx>1e100){var lval;lval=-0.5*n*(2*Math.log(Math.abs(x))-Math.log(n)) +-jstat.logBeta(0.5*n,0.5)-Math.log(0.5*n);val=log_p?lval:Math.exp(lval);}else{if(n>x*x){var beta=new BetaDistribution(0.5,n/2.);return beta._cdf(x*x/(n+x*x),false,log_p);}else{beta=new BetaDistribution(n/2.,0.5);return beta._cdf(1./nx,true,log_p);}} +if(x<=0.) +lower_tail=!lower_tail;if(log_p){if(lower_tail)return jstat.log1p(-0.5*Math.exp(val));else return val-M_LN2;} +else{val/=2.;if(lower_tail){return(0.5-val+0.5);}else{return val;}}},_pnt:function(t,lower_tail,log_p){var dof=this._dof;var ncp=this._mu;var DT_0,DT_1;if(lower_tail){if(log_p){DT_0=Number.NEGATIVE_INFINITY;DT_1=1.;}else{DT_0=0.;DT_1=1.;}}else{if(log_p){DT_0=0.;DT_1=Number.NEGATIVE_INFINITY;}else{DT_0=1.;DT_1=0.;}} +var albeta,a,b,del,errbd,lambda,rxb,tt,x;var geven,godd,p,q,s,tnc,xeven,xodd;var it,negdel;var ITRMAX=1000;var ERRMAX=1.e-7;if(dof<=0.0){return Number.NaN;}else if(dof==0.0){return this._pt(t);} +if(!jstat.isFinite(t)){return(t<0)?DT_0:DT_1;} +if(t>=0.){negdel=false;tt=t;del=ncp;}else{if(ncp>=40&&(!log_p||!lower_tail)){return DT_0;} +negdel=true;tt=-t;del=-ncp;} +if(dof>4e5||del*del>2*Math.LN2*(-(jstat.DBL_MIN_EXP))){s=1./(4.*dof);var norm=new NormalDistribution(del,Math.sqrt(1.+tt*tt*2.*s));var result=norm._cdf(tt*(1.-s),lower_tail!=negdel,log_p);return result;} +x=t*t;rxb=dof/(x+dof);x=x/(x+dof);if(x>0.){lambda=del*del;p=.5*Math.exp(-.5*lambda);if(p==0.){console.warn("underflow in _pnt");return DT_0;} +q=jstat.SQRT_2dPI*p*del;s=.5-p;if(s<1e-7){s=-0.5*jstat.expm1(-0.5*lambda);} +a=.5;b=.5*dof;rxb=Math.pow(rxb,b);albeta=jstat.LN_SQRT_PI+jstat.lgamma(b)-jstat.lgamma(.5+b);xodd=jstat.incompleteBeta(a,b,x);godd=2.*rxb*Math.exp(a*Math.log(x)-albeta);tnc=b*x;xeven=(tnc1)break;errbd=2.*s*(xodd-godd);if(Math.abs(errbd)1-1e-10&&lower_tail){console.warn("precision error _pnt");} +var res=jstat.fmin2(tnc,1.);if(lower_tail){if(log_p){return Math.log(res);}else{return res;}}else{if(log_p){return jstat.log1p(-(res));}else{return(0.5-(res)+0.5);}}},getDegreesOfFreedom:function(){return this._dof;},getNonCentralityParameter:function(){return this._mu;},getMean:function(){if(this._dof>1){var ans=(1/2)*Math.log(this._dof/2)+jstat.lgamma((this._dof-1)/2)-jstat.lgamma(this._dof/2) +return Math.exp(ans)*this._mu;}else{return Number.NaN;}},getVariance:function(){if(this._dof>2){var ans=this._dof*(1+this._mu*this._mu)/(this._dof-2)-(((this._mu*this._mu*this._dof)/2)*Math.pow(Math.exp(jstat.lgamma((this._dof-1)/2)-jstat.lgamma(this._dof/2)),2));return ans;}else{return Number.NaN;}}});var Plot=Class.extend({init:function(id,options){this._container='#'+String(id);this._plots=[];this._flotObj=null;this._locked=false;if(options!=null){this._options=options;}else{this._options={};}},getContainer:function(){return this._container;},getGraph:function(){return this._flotObj;},setData:function(data){this._plots=data;},clear:function(){this._plots=[];},showLegend:function(){this._options.legend={show:true} +this.render();},hideLegend:function(){this._options.legend={show:false} +this.render();},render:function(){this._flotObj=null;this._flotObj=$.plot($(this._container),this._plots,this._options);}});var DistributionPlot=Plot.extend({init:function(id,distribution,range,options){this._super(id,options);this._showPDF=true;this._showCDF=false;this._pdfValues=[];this._cdfValues=[];this._maxY=1;this._plotType='line';this._fill=false;this._distribution=distribution;if(range!=null&&Range.validate(range)){this._range=range;}else{this._range=this._distribution.getRange();} +if(this._distribution!=null){this._maxY=this._generateValues();}else{this._options.xaxis={min:range.getMinimum(),max:range.getMaximum()} +this._options.yaxis={max:1}} +this.render();},setHover:function(bool){if(bool){if(this._options.grid==null){this._options.grid={hoverable:true,mouseActiveRadius:25}}else{this._options.grid.hoverable=true,this._options.grid.mouseActiveRadius=25} +function showTooltip(x,y,contents,color){$('
'+contents+'
').css({position:'absolute',display:'none',top:y+15,'font-size':'small',left:x+5,border:'1px solid '+color[1],color:color[2],padding:'5px','background-color':color[0],opacity:0.80}).appendTo("body").show();} +var previousPoint=null;$(this._container).bind("plothover",function(event,pos,item){$("#x").text(pos.x.toFixed(2));$("#y").text(pos.y.toFixed(2));if(item){if(previousPoint!=item.datapoint){previousPoint=item.datapoint;$("#jstat_tooltip").remove();var x=jstat.toSigFig(item.datapoint[0],2),y=jstat.toSigFig(item.datapoint[1],2);var text=null;var color=item.series.color;if(item.series.label=='PDF'){text="P("+x+") = "+y;color=["#fee","#fdd","#C05F5F"];}else{text="F("+x+") = "+y;color=["#eef","#ddf","#4A4AC0"];} +showTooltip(item.pageX,item.pageY,text,color);}} +else{$("#jstat_tooltip").remove();previousPoint=null;}});$(this._container).bind("mouseleave",function(){if($('#jstat_tooltip').is(':visible')){$('#jstat_tooltip').remove();previousPoint=null;}});}else{if(this._options.grid==null){this._options.grid={hoverable:false}}else{this._options.grid.hoverable=false} +$(this._container).unbind("plothover");} +this.render();},setType:function(type){this._plotType=type;var lines={};var points={};if(this._plotType=='line'){lines.show=true;points.show=false;}else if(this._plotType=='points'){lines.show=false;points.show=true;}else if(this._plotType=='both'){lines.show=true;points.show=true;} +if(this._options.series==null){this._options.series={lines:lines,points:points}}else{if(this._options.series.lines==null){this._options.series.lines=lines;}else{this._options.series.lines.show=lines.show;} +if(this._options.series.points==null){this._options.series.points=points;}else{this._options.series.points.show=points.show;}} +this.render();},setFill:function(bool){this._fill=bool;if(this._options.series==null){this._options.series={lines:{fill:bool}}}else{if(this._options.series.lines==null){this._options.series.lines={fill:bool}}else{this._options.series.lines.fill=bool;}} +this.render();},clear:function(){this._super();this._distribution=null;this._pdfValues=[];this._cdfValues=[];this.render();},_generateValues:function(){this._cdfValues=[];this._pdfValues=[];var xs=this._range.getPoints();this._options.xaxis={min:xs[0],max:xs[xs.length-1]} +var pdfs=this._distribution.density(this._range);var cdfs=this._distribution.cumulativeDensity(this._range);for(var i=0;i 1) { + logme('ERROR: Found more than one slider for the parameter "' + paramName + '".'); + logme('sliderDiv.length = ', sliderDiv.length); + } // else { + // logme('MESSAGE: Did not find a slider for the parameter "' + paramName + '".'); + // } + } + + function createSlider(sliderDiv, paramName) { + var paramObj; + + paramObj = state.getParamObj(paramName); + + // Check that the retrieval went OK. + if (paramObj === undefined) { + logme('ERROR: Could not get a paramObj for parameter "' + paramName + '".'); + + return; + } + + // Create a jQuery UI slider from the slider DIV. We will set + // starting parameters, and will also attach a handler to update + // the 'state' on the 'slide' event. + sliderDiv.slider({ + 'min': paramObj.min, + 'max': paramObj.max, + 'value': paramObj.value, + 'step': paramObj.step + }); + + // Tell the parameter object stored in state that we have a slider + // that is attached to it. Next time when the parameter changes, it + // will also update the value of this slider. + paramObj.sliderDiv = sliderDiv; + + // Atach callbacks to update the slider's parameter. + paramObj.sliderDiv.on('slide', sliderOnSlide); + paramObj.sliderDiv.on('slidechange', sliderOnChange); + + return; + + // Update the 'state' - i.e. set the value of the parameter this + // slider is attached to to a new value. + // + // This will cause the plot to be redrawn each time after the user + // drags the slider handle and releases it. + function sliderOnSlide(event, ui) { + // Last parameter passed to setParameterValue() will be 'true' + // so that the function knows we are a slider, and it can + // change the our value back in the case when the new value is + // invalid for some reason. + if (state.setParameterValue(paramName, ui.value, sliderDiv, true, 'slide') === undefined) { + logme('ERROR: Could not update the parameter named "' + paramName + '" with the value "' + ui.value + '".'); + } + } + + function sliderOnChange(event, ui) { + if (state.setParameterValue(paramName, ui.value, sliderDiv, true, 'change') === undefined) { + logme('ERROR: Could not update the parameter named "' + paramName + '" with the value "' + ui.value + '".'); + } + } + } + } +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js new file mode 100644 index 0000000000..91a78f62ff --- /dev/null +++ b/common/lib/xmodule/xmodule/js/src/graphical_slider_tool/state.js @@ -0,0 +1,395 @@ +// Wrapper for RequireJS. It will make the standard requirejs(), require(), and +// define() functions from Require JS available inside the anonymous function. +(function (requirejs, require, define) { + +define('State', ['logme'], function (logme) { + var stateInst; + + // Since there will be (can be) multiple GST on a page, and each will have + // a separate state, we will create a factory constructor function. The + // constructor will expect the ID of the DIV with the GST contents, and the + // configuration object (parsed from a JSON string). It will return an + // object containing methods to set and get the private state properties. + + stateInst = 0; + + // This module defines and returns a factory constructor. + return State; + + function State(gstId, config) { + var parameters, allParameterNames, allParameterValues, + plotDiv, dynamicEl, dynamicElByElId; + + dynamicEl = []; + dynamicElByElId = {}; + + stateInst += 1; + // logme('MESSAGE: Creating state instance # ' + stateInst + '.'); + + // Initially, there are no parameters to track. So, we will instantiate + // an empty object. + // + // As we parse the JSON config object, we will add parameters as + // named properties. For example + // + // parameters.a = {...}; + // + // will be created for the parameter 'a'. + parameters = {}; + + // Check that the required parameters config object is available. + if ($.isPlainObject(config.parameters) === false) { + logme('ERROR: Expected config.parameters to be an object. It is not.'); + logme('config.parameters = ', config.parameters); + + return; + } + + // If config.parameters.param is an array, pass it to the processor + // element by element. + if ($.isArray(config.parameters.param) === true) { + (function (c1) { + while (c1 < config.parameters.param.length) { + processParameter(config.parameters.param[c1]); + c1 += 1; + } + }(0)); + } + + // If config.parameters.param is an object, pass this object to the + // processor directly. + else if ($.isPlainObject(config.parameters.param) === true) { + processParameter(config.parameters.param); + } + + // If config.parameters.param is some other type, report an error and + // do not continue. + else { + logme('ERROR: config.parameters.param is of an unsupported type.'); + logme('config.parameters.param = ', config.parameters.param); + + return; + } + + // Instead of building these arrays every time when some component + // requests them, we will create them in the beginning, and then update + // each element individually when some parameter's value changes. + // + // Then we can just return the required array, instead of iterating + // over all of the properties of the 'parameters' object, and + // extracting their names/values one by one. + allParameterNames = []; + allParameterValues = []; + + // Populate 'allParameterNames', and 'allParameterValues' with data. + generateHelperArrays(); + + // The constructor will return an object with methods to operate on + // it's private properties. + return { + 'getParameterValue': getParameterValue, + 'setParameterValue': setParameterValue, + + 'getParamObj': getParamObj, + + 'getAllParameterNames': getAllParameterNames, + 'getAllParameterValues': getAllParameterValues, + + 'bindUpdatePlotEvent': bindUpdatePlotEvent, + 'addDynamicEl': addDynamicEl, + + // plde is an abbreviation for Plot Label Dynamic Elements. + plde: [] + }; + + function getAllParameterNames() { + return allParameterNames; + } + + function getAllParameterValues() { + return allParameterValues; + } + + function getParamObj(paramName) { + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + return parameters[paramName]; + } + + function bindUpdatePlotEvent(newPlotDiv, callback) { + plotDiv = newPlotDiv; + + plotDiv.bind('update_plot', callback); + } + + function addDynamicEl(el, func, elId, updateOnEvent) { + var newLength; + + newLength = dynamicEl.push({ + 'el': el, + 'func': func, + 'elId': elId, + 'updateOnEvent': updateOnEvent + }); + + if (typeof dynamicElByElId[elId] !== 'undefined') { + logme( + 'ERROR: Duplicate dynamic element ID "' + elId + '" found.' + ); + } else { + dynamicElByElId[elId] = dynamicEl[newLength - 1]; + } + } + + function getParameterValue(paramName) { + + // If the name of the constant is not tracked by state, return an + // 'undefined' value. + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + return parameters[paramname].value; + } + + // #################################################################### + // + // Function: setParameterValue(paramName, paramValue, element) + // -------------------------------------------------- + // + // + // This function can be called from a callback, registered by a slider + // or a text input, when specific events ('slide' or 'change') are + // triggered. + // + // The 'paramName' is the name of the parameter in 'parameters' object + // whose value must be updated to the new value of 'paramValue'. + // + // Before we update the value, we must check that: + // + // 1.) the parameter named as 'paramName' actually exists in the + // 'parameters' object; + // 2.) the value 'paramValue' is a valid floating-point number, and + // it lies within the range specified by the 'min' and 'max' + // properties of the stored parameter object. + // + // If 'paramName' and 'paramValue' turn out to be valid, we will update + // the stored value in the parameter with the new value, and also + // update all of the text inputs and the slider that correspond to this + // parameter (if any), so that they reflect the new parameter's value. + // Finally, the helper array 'allParameterValues' will also be updated + // to reflect the change. + // + // If something went wrong (for example the new value is outside the + // allowed range), then we will reset the 'element' to display the + // original value. + // + // #################################################################### + function setParameterValue(paramName, paramValue, element, slider, updateOnEvent) { + var paramValueNum, c1; + + // If a parameter with the name specified by the 'paramName' + // parameter is not tracked by state, do not do anything. + if (parameters.hasOwnProperty(paramName) === false) { + logme('ERROR: Object parameters does not have a property named "' + paramName + '".'); + + return; + } + + // Try to convert the passed value to a valid floating-point + // number. + paramValueNum = parseFloat(paramValue); + + // We are interested only in valid float values. NaN, -INF, + // +INF we will disregard. + if (isFinite(paramValueNum) === false) { + logme('ERROR: New parameter value is not a floating-point number.'); + logme('paramValue = ', paramValue); + + return; + } + + if (paramValueNum < parameters[paramName].min) { + paramValueNum = parameters[paramName].min; + } else if (paramValueNum > parameters[paramName].max) { + paramValueNum = parameters[paramName].max; + } + + parameters[paramName].value = paramValueNum; + + // Update all text inputs with the new parameter's value. + for (c1 = 0; c1 < parameters[paramName].inputDivs.length; c1 += 1) { + parameters[paramName].inputDivs[c1].val(paramValueNum); + } + + // Update the single slider with the new parameter's value. + if ((slider === false) && (parameters[paramName].sliderDiv !== null)) { + parameters[paramName].sliderDiv.slider('value', paramValueNum); + } + + // Update the helper array with the new parameter's value. + allParameterValues[parameters[paramName].helperArrayIndex] = paramValueNum; + + for (c1 = 0; c1 < dynamicEl.length; c1++) { + if ( + ((updateOnEvent !== undefined) && (dynamicEl[c1].updateOnEvent === updateOnEvent)) || + (updateOnEvent === undefined) + ) { + // If we have a DOM element, call the function "paste" the answer into the DIV. + if (dynamicEl[c1].el !== null) { + dynamicEl[c1].el.html(dynamicEl[c1].func.apply(window, allParameterValues)); + } + // If we DO NOT have an element, simply call the function. The function can then + // manipulate all the DOM elements it wants, without the fear of them being overwritten + // by us afterwards. + else { + dynamicEl[c1].func.apply(window, allParameterValues); + } + } + } + + // If we have a plot DIV to work with, tell to update. + if (plotDiv !== undefined) { + plotDiv.trigger('update_plot'); + } + + return true; + } // End-of: function setParameterValue + + // #################################################################### + // + // Function: processParameter(obj) + // ------------------------------- + // + // + // This function will be run once for each instance of a GST when + // parsing the JSON config object. + // + // 'newParamObj' must be empty from the start for each invocation of + // this function, that's why we will declare it locally. + // + // We will parse the passed object 'obj' and populate the 'newParamObj' + // object with required properties. + // + // Since there will be many properties that are of type floating-point + // number, we will have a separate function for parsing them. + // + // processParameter() will fail right away if 'obj' does not have a + // '@var' property which represents the name of the parameter we want + // to process. + // + // If, after all of the properties have been processed, we reached the + // end of the function successfully, the 'newParamObj' will be added to + // the 'parameters' object (that is defined in the scope of State() + // function) as a property named as the name of the parameter. + // + // If at least one of the properties from 'obj' does not get correctly + // parsed, then the parameter represented by 'obj' will be disregarded. + // It will not be available to user-defined plotting functions, and + // things will most likely break. We will notify the user about this. + // + // #################################################################### + function processParameter(obj) { + var paramName, newParamObj; + + if (typeof obj['@var'] !== 'string') { + logme('ERROR: Expected obj["@var"] to be a string. It is not.'); + logme('obj["@var"] = ', obj['@var']); + + return; + } + + paramName = obj['@var']; + newParamObj = {}; + + if ( + (processFloat('@min', 'min') === false) || + (processFloat('@max', 'max') === false) || + (processFloat('@step', 'step') === false) || + (processFloat('@initial', 'value') === false) + ) { + logme('ERROR: A required property is missing. Not creating parameter "' + paramName + '"'); + + return; + } + + // Pointers to text input and slider DIV elements that this + // parameter will be attached to. Initially there are none. When we + // will create text inputs and sliders, we will update these + // properties. + newParamObj.inputDivs = []; + newParamObj.sliderDiv = null; + + // Everything went well, so save the new parameter object. + parameters[paramName] = newParamObj; + + return; + + function processFloat(attrName, newAttrName) { + var attrValue; + + if (typeof obj[attrName] !== 'string') { + logme('ERROR: Expected obj["' + attrName + '"] to be a string. It is not.'); + logme('obj["' + attrName + '"] = ', obj[attrName]); + + return false; + } else { + attrValue = parseFloat(obj[attrName]); + + if (isFinite(attrValue) === false) { + logme('ERROR: Expected obj["' + attrName + '"] to be a valid floating-point number. It is not.'); + logme('obj["' + attrName + '"] = ', obj[attrName]); + + return false; + } + } + + newParamObj[newAttrName] = attrValue; + + return true; + } // End-of: function processFloat + } // End-of: function processParameter + + // #################################################################### + // + // Function: generateHelperArrays() + // ------------------------------- + // + // + // Populate 'allParameterNames' and 'allParameterValues' with data. + // Link each parameter object with the corresponding helper array via + // an index 'helperArrayIndex'. It will be the same for both of the + // arrays. + // + // NOTE: It is important to remember to update these helper arrays + // whenever a new parameter is added (or one is removed), or when a + // parameter's value changes. + // + // #################################################################### + function generateHelperArrays() { + var paramName, c1; + + c1 = 0; + for (paramName in parameters) { + allParameterNames.push(paramName); + allParameterValues.push(parameters[paramName].value); + + parameters[paramName].helperArrayIndex = c1; + + c1 += 1; + } + } + } // End-of: function State +}); + +// End of wrapper for RequireJS. As you can see, we are passing +// namespaced Require JS variables to an anonymous function. Within +// it, you can use the standard requirejs(), require(), and define() +// functions as if they were in the global namespace. +}(RequireJS.requirejs, RequireJS.require, RequireJS.define)); // End-of: (function (requirejs, require, define) diff --git a/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee b/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee deleted file mode 100644 index 951eb42fce..0000000000 --- a/common/lib/xmodule/xmodule/js/src/selfassessment/display.coffee +++ /dev/null @@ -1,133 +0,0 @@ -class @SelfAssessment - constructor: (element) -> - @el = $(element).find('section.self-assessment') - @id = @el.data('id') - @ajax_url = @el.data('ajax-url') - @state = @el.data('state') - @allow_reset = @el.data('allow_reset') - # valid states: 'initial', 'assessing', 'request_hint', 'done' - - # Where to put the rubric once we load it - @errors_area = @$('.error') - @answer_area = @$('textarea.answer') - - @rubric_wrapper = @$('.rubric-wrapper') - @hint_wrapper = @$('.hint-wrapper') - @message_wrapper = @$('.message-wrapper') - @submit_button = @$('.submit-button') - @reset_button = @$('.reset-button') - @reset_button.click @reset - - @find_assessment_elements() - @find_hint_elements() - - @rebind() - - # locally scoped jquery. - $: (selector) -> - $(selector, @el) - - rebind: () => - # rebind to the appropriate function for the current state - @submit_button.unbind('click') - @submit_button.show() - @reset_button.hide() - @hint_area.attr('disabled', false) - if @state == 'initial' - @answer_area.attr("disabled", false) - @submit_button.prop('value', 'Submit') - @submit_button.click @save_answer - else if @state == 'assessing' - @answer_area.attr("disabled", true) - @submit_button.prop('value', 'Submit assessment') - @submit_button.click @save_assessment - else if @state == 'request_hint' - @answer_area.attr("disabled", true) - @submit_button.prop('value', 'Submit hint') - @submit_button.click @save_hint - else if @state == 'done' - @answer_area.attr("disabled", true) - @hint_area.attr('disabled', true) - @submit_button.hide() - if @allow_reset - @reset_button.show() - else - @reset_button.hide() - - - find_assessment_elements: -> - @assessment = @$('select.assessment') - - find_hint_elements: -> - @hint_area = @$('textarea.hint') - - save_answer: (event) => - event.preventDefault() - if @state == 'initial' - data = {'student_answer' : @answer_area.val()} - $.postWithPrefix "#{@ajax_url}/save_answer", data, (response) => - if response.success - @rubric_wrapper.html(response.rubric_html) - @state = 'assessing' - @find_assessment_elements() - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - save_assessment: (event) => - event.preventDefault() - if @state == 'assessing' - data = {'assessment' : @assessment.find(':selected').text()} - $.postWithPrefix "#{@ajax_url}/save_assessment", data, (response) => - if response.success - @state = response.state - - if @state == 'request_hint' - @hint_wrapper.html(response.hint_html) - @find_hint_elements() - else if @state == 'done' - @message_wrapper.html(response.message_html) - @allow_reset = response.allow_reset - - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - - save_hint: (event) => - event.preventDefault() - if @state == 'request_hint' - data = {'hint' : @hint_area.val()} - - $.postWithPrefix "#{@ajax_url}/save_hint", data, (response) => - if response.success - @message_wrapper.html(response.message_html) - @state = 'done' - @allow_reset = response.allow_reset - @rebind() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') - - - reset: (event) => - event.preventDefault() - if @state == 'done' - $.postWithPrefix "#{@ajax_url}/reset", {}, (response) => - if response.success - @answer_area.html('') - @rubric_wrapper.html('') - @hint_wrapper.html('') - @message_wrapper.html('') - @state = 'initial' - @rebind() - @reset_button.hide() - else - @errors_area.html(response.error) - else - @errors_area.html('Problem state got out of sync. Try reloading the page.') diff --git a/common/lib/xmodule/xmodule/js/src/video/display.coffee b/common/lib/xmodule/xmodule/js/src/video/display.coffee index 6587f05899..a170075b68 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display.coffee @@ -2,6 +2,8 @@ class @Video constructor: (element) -> @el = $(element).find('.video') @id = @el.attr('id').replace(/video_/, '') + @start = @el.data('start') + @end = @el.data('end') @caption_data_dir = @el.data('caption-data-dir') @show_captions = @el.data('show-captions') == "true" window.player = null diff --git a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee index 8829e25dac..ec52d15874 100644 --- a/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee +++ b/common/lib/xmodule/xmodule/js/src/video/display/video_player.coffee @@ -36,14 +36,21 @@ class @VideoPlayer extends Subview @volumeControl = new VideoVolumeControl el: @$('.secondary-controls') @speedControl = new VideoSpeedControl el: @$('.secondary-controls'), speeds: @video.speeds, currentSpeed: @currentSpeed() @progressSlider = new VideoProgressSlider el: @$('.slider') + @playerVars = + controls: 0 + wmode: 'transparent' + rel: 0 + showinfo: 0 + enablejsapi: 1 + modestbranding: 1 + if @video.start + @playerVars.start = @video.start + if @video.end + # work in AS3, not HMLT5. but iframe use AS3 + @playerVars.end = @video.end + @player = new YT.Player @video.id, - playerVars: - controls: 0 - wmode: 'transparent' - rel: 0 - showinfo: 0 - enablejsapi: 1 - modestbranding: 1 + playerVars: @playerVars videoId: @video.youtubeId() events: onReady: @onReady diff --git a/common/lib/xmodule/xmodule/modulestore/__init__.py b/common/lib/xmodule/xmodule/modulestore/__init__.py index fa8cf8d3d7..f86a6e9600 100644 --- a/common/lib/xmodule/xmodule/modulestore/__init__.py +++ b/common/lib/xmodule/xmodule/modulestore/__init__.py @@ -339,9 +339,15 @@ class ModuleStore(object): ''' raise NotImplementedError - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_course(self, course_id): + ''' + Look for a specific course id. Returns the course descriptor, or None if not found. + ''' + raise NotImplementedError + + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). returns an iterable of things that can be passed to Location. ''' @@ -399,3 +405,10 @@ class ModuleStoreBase(ModuleStore): errorlog = self._get_errorlog(location) return errorlog.errors + + def get_course(self, course_id): + """Default impl--linear search through course list""" + for c in self.get_courses(): + if c.id == course_id: + return c + return None diff --git a/common/lib/xmodule/xmodule/modulestore/mongo.py b/common/lib/xmodule/xmodule/modulestore/mongo.py index baa4e7870c..4c7ef3c050 100644 --- a/common/lib/xmodule/xmodule/modulestore/mongo.py +++ b/common/lib/xmodule/xmodule/modulestore/mongo.py @@ -309,9 +309,9 @@ class MongoModuleStore(ModuleStoreBase): self._update_single_item(location, {'metadata': metadata}) - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). If there is no data at location in this modulestore, raise ItemNotFoundError. diff --git a/common/lib/xmodule/xmodule/modulestore/search.py b/common/lib/xmodule/xmodule/modulestore/search.py index f9901e8bfe..4a5ece6854 100644 --- a/common/lib/xmodule/xmodule/modulestore/search.py +++ b/common/lib/xmodule/xmodule/modulestore/search.py @@ -64,7 +64,7 @@ def path_to_location(modulestore, course_id, location): # isn't found so we don't have to do it explicitly. Call this # first to make sure the location is there (even if it's a course, and # we would otherwise immediately exit). - parents = modulestore.get_parent_locations(loc) + parents = modulestore.get_parent_locations(loc, course_id) # print 'Processing loc={0}, path={1}'.format(loc, path) if loc.category == "course": diff --git a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py index c1d1d50a53..64816581ce 100644 --- a/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py +++ b/common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py @@ -23,12 +23,3 @@ def check_path_to_location(modulestore): for location in not_found: assert_raises(ItemNotFoundError, path_to_location, modulestore, course_id, location) - # Since our test files are valid, there shouldn't be any - # elements with no path to them. But we can look for them in - # another course. - no_path = ( - "i4x://edX/simple/video/Lost_Video", - ) - for location in no_path: - assert_raises(NoPathToItem, path_to_location, modulestore, course_id, location) - diff --git a/common/lib/xmodule/xmodule/modulestore/xml.py b/common/lib/xmodule/xmodule/modulestore/xml.py index 6f8430917d..04f3a94d1b 100644 --- a/common/lib/xmodule/xmodule/modulestore/xml.py +++ b/common/lib/xmodule/xmodule/modulestore/xml.py @@ -152,7 +152,7 @@ class ImportSystem(XMLParsingSystem, MakoDescriptorSystem): make_name_unique(xml_data) descriptor = XModuleDescriptor.load_from_xml( - etree.tostring(xml_data), self, self.org, + etree.tostring(xml_data, encoding='unicode'), self, self.org, self.course, xmlstore.default_class) except Exception as err: print err, self.load_error_modules @@ -275,14 +275,16 @@ class XMLModuleStore(ModuleStoreBase): class_ = getattr(import_module(module_path), class_name) self.default_class = class_ - self.parent_tracker = ParentTracker() + self.parent_trackers = defaultdict(ParentTracker) # If we are specifically asked for missing courses, that should # be an error. If we are asked for "all" courses, find the ones - # that have a course.xml + # that have a course.xml. We sort the dirs in alpha order so we always + # read things in the same order (OS differences in load order have + # bitten us in the past.) if course_dirs is None: - course_dirs = [d for d in os.listdir(self.data_dir) if - os.path.exists(self.data_dir / d / "course.xml")] + course_dirs = sorted([d for d in os.listdir(self.data_dir) if + os.path.exists(self.data_dir / d / "course.xml")]) for course_dir in course_dirs: self.try_load_course(course_dir) @@ -307,7 +309,7 @@ class XMLModuleStore(ModuleStoreBase): if course_descriptor is not None: self.courses[course_dir] = course_descriptor self._location_errors[course_descriptor.location] = errorlog - self.parent_tracker.make_known(course_descriptor.location) + self.parent_trackers[course_descriptor.id].make_known(course_descriptor.location) else: # Didn't load course. Instead, save the errors elsewhere. self.errored_courses[course_dir] = errorlog @@ -432,11 +434,11 @@ class XMLModuleStore(ModuleStoreBase): course_dir, policy, tracker, - self.parent_tracker, + self.parent_trackers[course_id], self.load_error_modules, ) - course_descriptor = system.process_xml(etree.tostring(course_data)) + course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode')) # NOTE: The descriptors end up loading somewhat bottom up, which # breaks metadata inheritance via get_children(). Instead @@ -541,9 +543,9 @@ class XMLModuleStore(ModuleStoreBase): """ raise NotImplementedError("XMLModuleStores are read-only") - def get_parent_locations(self, location): - '''Find all locations that are the parents of this location. Needed - for path_to_location(). + def get_parent_locations(self, location, course_id): + '''Find all locations that are the parents of this location in this + course. Needed for path_to_location(). If there is no data at location in this modulestore, raise ItemNotFoundError. @@ -552,7 +554,7 @@ class XMLModuleStore(ModuleStoreBase): be empty if there are no parents. ''' location = Location.ensure_fully_specified(location) - if not self.parent_tracker.is_known(location): - raise ItemNotFoundError(location) + if not self.parent_trackers[course_id].is_known(location): + raise ItemNotFoundError("{0} not in {1}".format(location, course_id)) - return self.parent_tracker.parents(location) + return self.parent_trackers[course_id].parents(location) diff --git a/common/lib/xmodule/xmodule/open_ended_module.py b/common/lib/xmodule/xmodule/open_ended_module.py new file mode 100644 index 0000000000..11f96c9848 --- /dev/null +++ b/common/lib/xmodule/xmodule/open_ended_module.py @@ -0,0 +1,660 @@ +""" +A Self Assessment module that allows students to write open-ended responses, +submit, then see a rubric and rate themselves. Persists student supplied +hints, answers, and assessment judgment (currently only correct/incorrect). +Parses xml definition file--see below for exact format. +""" + +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys +import hashlib +import capa.xqueue_interface as xqueue_interface + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +from capa.util import * +import openendedchild + +from mitxmako.shortcuts import render_to_string +from numpy import median + +from datetime import datetime + +from combined_open_ended_rubric import CombinedOpenEndedRubric + +log = logging.getLogger("mitx.courseware") + +class OpenEndedModule(openendedchild.OpenEndedChild): + """ + The open ended module supports all external open ended grader problems. + Sample XML file: + + + Enter essay here. + This is the answer. + {"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"} + + + """ + + def setup_response(self, system, location, definition, descriptor): + """ + Sets up the response type. + @param system: Modulesystem object + @param location: The location of the problem + @param definition: The xml definition of the problem + @param descriptor: The OpenEndedDescriptor associated with this + @return: None + """ + oeparam = definition['oeparam'] + + self.url = definition.get('url', None) + self.queue_name = definition.get('queuename', self.DEFAULT_QUEUE) + self.message_queue_name = definition.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE) + + #This is needed to attach feedback to specific responses later + self.submission_id = None + self.grader_id = None + + if oeparam is None: + raise ValueError("No oeparam found in problem xml.") + if self.prompt is None: + raise ValueError("No prompt found in problem xml.") + if self.rubric is None: + raise ValueError("No rubric found in problem xml.") + + self._parse(oeparam, self.prompt, self.rubric, system) + + if self.created == True and self.state == self.ASSESSING: + self.created = False + self.send_to_grader(self.latest_answer(), system) + self.created = False + + def _parse(self, oeparam, prompt, rubric, system): + ''' + Parse OpenEndedResponse XML: + self.initial_display + self.payload - dict containing keys -- + 'grader' : path to grader settings file, 'problem_id' : id of the problem + + self.answer - What to display when show answer is clicked + ''' + # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload + prompt_string = stringify_children(prompt) + rubric_string = stringify_children(rubric) + self.prompt = prompt_string + self.rubric = rubric_string + + grader_payload = oeparam.find('grader_payload') + grader_payload = grader_payload.text if grader_payload is not None else '' + + #Update grader payload with student id. If grader payload not json, error. + try: + parsed_grader_payload = json.loads(grader_payload) + # NOTE: self.system.location is valid because the capa_module + # __init__ adds it (easiest way to get problem location into + # response types) + except TypeError, ValueError: + log.exception("Grader payload %r is not a json object!", grader_payload) + + self.initial_display = find_with_default(oeparam, 'initial_display', '') + self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.') + + parsed_grader_payload.update({ + 'location': system.location.url(), + 'course_id': system.course_id, + 'prompt': prompt_string, + 'rubric': rubric_string, + 'initial_display': self.initial_display, + 'answer': self.answer, + }) + updated_grader_payload = json.dumps(parsed_grader_payload) + + self.payload = {'grader_payload': updated_grader_payload} + + def skip_post_assessment(self, get, system): + """ + Ajax function that allows one to skip the post assessment phase + @param get: AJAX dictionary + @param system: ModuleSystem + @return: Success indicator + """ + self.state = self.DONE + return {'success': True} + + def message_post(self, get, system): + """ + Handles a student message post (a reaction to the grade they received from an open ended grader type) + Returns a boolean success/fail and an error message + """ + + event_info = dict() + event_info['problem_id'] = system.location.url() + event_info['student_id'] = system.anonymous_student_id + event_info['survey_responses'] = get + + survey_responses = event_info['survey_responses'] + for tag in ['feedback', 'submission_id', 'grader_id', 'score']: + if tag not in survey_responses: + return {'success': False, 'msg': "Could not find needed tag {0}".format(tag)} + try: + submission_id = int(survey_responses['submission_id']) + grader_id = int(survey_responses['grader_id']) + feedback = str(survey_responses['feedback'].encode('ascii', 'ignore')) + score = int(survey_responses['score']) + except: + error_message = ("Could not parse submission id, grader id, " + "or feedback from message_post ajax call. Here is the message data: {0}".format( + survey_responses)) + log.exception(error_message) + return {'success': False, 'msg': "There was an error saving your feedback. Please contact course staff."} + + qinterface = system.xqueue['interface'] + qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) + anonymous_student_id = system.anonymous_student_id + queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + + anonymous_student_id + + str(len(self.history))) + + xheader = xqueue_interface.make_xheader( + lms_callback_url=system.xqueue['callback_url'], + lms_key=queuekey, + queue_name=self.message_queue_name + ) + + student_info = {'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, + } + contents = { + 'feedback': feedback, + 'submission_id': submission_id, + 'grader_id': grader_id, + 'score': score, + 'student_info': json.dumps(student_info), + } + + (error, msg) = qinterface.send_to_queue(header=xheader, + body=json.dumps(contents)) + + #Convert error to a success value + success = True + if error: + success = False + + self.state = self.DONE + + return {'success': success, 'msg': "Successfully submitted your feedback."} + + def send_to_grader(self, submission, system): + """ + Send a given submission to the grader, via the xqueue + @param submission: The student submission to send to the grader + @param system: Modulesystem + @return: Boolean true (not useful right now) + """ + + # Prepare xqueue request + #------------------------------------------------------------ + + qinterface = system.xqueue['interface'] + qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat) + + anonymous_student_id = system.anonymous_student_id + + # Generate header + queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime + + anonymous_student_id + + str(len(self.history))) + + xheader = xqueue_interface.make_xheader(lms_callback_url=system.xqueue['callback_url'], + lms_key=queuekey, + queue_name=self.queue_name) + + contents = self.payload.copy() + + # Metadata related to the student submission revealed to the external grader + student_info = {'anonymous_student_id': anonymous_student_id, + 'submission_time': qtime, + } + + #Update contents with student response and student info + contents.update({ + 'student_info': json.dumps(student_info), + 'student_response': submission, + 'max_score': self.max_score(), + }) + + # Submit request. When successful, 'msg' is the prior length of the queue + (error, msg) = qinterface.send_to_queue(header=xheader, + body=json.dumps(contents)) + + # State associated with the queueing request + queuestate = {'key': queuekey, + 'time': qtime, } + return True + + def _update_score(self, score_msg, queuekey, system): + """ + Called by xqueue to update the score + @param score_msg: The message from xqueue + @param queuekey: The key sent by xqueue + @param system: Modulesystem + @return: Boolean True (not useful currently) + """ + new_score_msg = self._parse_score_msg(score_msg) + if not new_score_msg['valid']: + score_msg['feedback'] = 'Invalid grader reply. Please contact the course staff.' + + self.record_latest_score(new_score_msg['score']) + self.record_latest_post_assessment(score_msg) + self.state = self.POST_ASSESSMENT + + return True + + + def get_answers(self): + """ + Gets and shows the answer for this problem. + @return: Answer html + """ + anshtml = '
{0}
'.format(self.answer) + return {self.answer_id: anshtml} + + def get_initial_display(self): + """ + Gets and shows the initial display for the input box. + @return: Initial display html + """ + return {self.answer_id: self.initial_display} + + def _convert_longform_feedback_to_html(self, response_items): + """ + Take in a dictionary, and return html strings for display to student. + Input: + response_items: Dictionary with keys success, feedback. + if success is True, feedback should be a dictionary, with keys for + types of feedback, and the corresponding feedback values. + if success is False, feedback is actually an error string. + + NOTE: this will need to change when we integrate peer grading, because + that will have more complex feedback. + + Output: + String -- html that can be displayincorrect-icon.pnged to the student. + """ + + # We want to display available feedback in a particular order. + # This dictionary specifies which goes first--lower first. + priorities = {# These go at the start of the feedback + 'spelling': 0, + 'grammar': 1, + # needs to be after all the other feedback + 'markup_text': 3} + + default_priority = 2 + + def get_priority(elt): + """ + Args: + elt: a tuple of feedback-type, feedback + Returns: + the priority for this feedback type + """ + return priorities.get(elt[0], default_priority) + + def encode_values(feedback_type, value): + feedback_type = str(feedback_type).encode('ascii', 'ignore') + if not isinstance(value, basestring): + value = str(value) + value = value.encode('ascii', 'ignore') + return feedback_type, value + + def format_feedback(feedback_type, value): + feedback_type, value = encode_values(feedback_type, value) + feedback = """ +
+ {value} +
+ """.format(feedback_type=feedback_type, value=value) + return feedback + + def format_feedback_hidden(feedback_type, value): + feedback_type, value = encode_values(feedback_type, value) + feedback = """ + + """.format(feedback_type=feedback_type, value=value) + return feedback + + # TODO (vshnayder): design and document the details of this format so + # that we can do proper escaping here (e.g. are the graders allowed to + # include HTML?) + + for tag in ['success', 'feedback', 'submission_id', 'grader_id']: + if tag not in response_items: + return format_feedback('errors', 'Error getting feedback') + + feedback_items = response_items['feedback'] + try: + feedback = json.loads(feedback_items) + except (TypeError, ValueError): + log.exception("feedback_items have invalid json %r", feedback_items) + return format_feedback('errors', 'Could not parse feedback') + + if response_items['success']: + if len(feedback) == 0: + return format_feedback('errors', 'No feedback available') + + feedback_lst = sorted(feedback.items(), key=get_priority) + feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst) + else: + feedback_list_part1 = format_feedback('errors', response_items['feedback']) + + feedback_list_part2 = (u"\n".join([format_feedback_hidden(feedback_type, value) + for feedback_type, value in response_items.items() + if feedback_type in ['submission_id', 'grader_id']])) + + return u"\n".join([feedback_list_part1, feedback_list_part2]) + + def _format_feedback(self, response_items): + """ + Input: + Dictionary called feedback. Must contain keys seen below. + Output: + Return error message or feedback template + """ + + log.debug(response_items) + rubric_feedback="" + feedback = self._convert_longform_feedback_to_html(response_items) + if response_items['rubric_scores_complete']==True: + rubric_feedback = CombinedOpenEndedRubric.render_rubric(response_items['rubric_xml']) + + if not response_items['success']: + return system.render_template("open_ended_error.html", + {'errors': feedback}) + + feedback_template = render_to_string("open_ended_feedback.html", { + 'grader_type': response_items['grader_type'], + 'score': "{0} / {1}".format(response_items['score'], self.max_score()), + 'feedback': feedback, + 'rubric_feedback' : rubric_feedback + }) + + return feedback_template + + + def _parse_score_msg(self, score_msg, join_feedback=True): + """ + Grader reply is a JSON-dump of the following dict + { 'correct': True/False, + 'score': Numeric value (floating point is okay) to assign to answer + 'msg': grader_msg + 'feedback' : feedback from grader + } + + Returns (valid_score_msg, correct, score, msg): + valid_score_msg: Flag indicating valid score_msg format (Boolean) + correct: Correctness of submission (Boolean) + score: Points to be assigned (numeric, can be float) + """ + fail = {'valid': False, 'score': 0, 'feedback': ''} + try: + score_result = json.loads(score_msg) + except (TypeError, ValueError): + error_message = ("External grader message should be a JSON-serialized dict." + " Received score_msg = {0}".format(score_msg)) + log.error(error_message) + fail['feedback'] = error_message + return fail + + if not isinstance(score_result, dict): + error_message = ("External grader message should be a JSON-serialized dict." + " Received score_result = {0}".format(score_result)) + log.error(error_message) + fail['feedback'] = error_message + return fail + + for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']: + if tag not in score_result: + error_message = ("External grader message is missing required tag: {0}" + .format(tag)) + log.error(error_message) + fail['feedback'] = error_message + return fail + #This is to support peer grading + if isinstance(score_result['score'], list): + feedback_items = [] + for i in xrange(0, len(score_result['score'])): + new_score_result = { + 'score': score_result['score'][i], + 'feedback': score_result['feedback'][i], + 'grader_type': score_result['grader_type'], + 'success': score_result['success'], + 'grader_id': score_result['grader_id'][i], + 'submission_id': score_result['submission_id'], + 'rubric_scores_complete' : score_result['rubric_scores_complete'], + 'rubric_xml' : score_result['rubric_xml'], + } + feedback_items.append(self._format_feedback(new_score_result)) + if join_feedback: + feedback = "".join(feedback_items) + else: + feedback = feedback_items + score = int(median(score_result['score'])) + else: + #This is for instructor and ML grading + feedback = self._format_feedback(score_result) + score = score_result['score'] + + self.submission_id = score_result['submission_id'] + self.grader_id = score_result['grader_id'] + + return {'valid': True, 'score': score, 'feedback': feedback} + + def latest_post_assessment(self, short_feedback=False, join_feedback=True): + """ + Gets the latest feedback, parses, and returns + @param short_feedback: If the long feedback is wanted or not + @return: Returns formatted feedback + """ + if not self.history: + return "" + + feedback_dict = self._parse_score_msg(self.history[-1].get('post_assessment', ""), join_feedback=join_feedback) + if not short_feedback: + return feedback_dict['feedback'] if feedback_dict['valid'] else '' + if feedback_dict['valid']: + short_feedback = self._convert_longform_feedback_to_html( + json.loads(self.history[-1].get('post_assessment', ""))) + return short_feedback if feedback_dict['valid'] else '' + + def format_feedback_with_evaluation(self, feedback): + """ + Renders a given html feedback into an evaluation template + @param feedback: HTML feedback + @return: Rendered html + """ + context = {'msg': feedback, 'id': "1", 'rows': 50, 'cols': 50} + html = render_to_string('open_ended_evaluation.html', context) + return html + + def handle_ajax(self, dispatch, get, system): + ''' + This is called by courseware.module_render, to handle an AJAX call. + "get" is request.POST. + + Returns a json dictionary: + { 'progress_changed' : True/False, + 'progress' : 'none'/'in_progress'/'done', + } + ''' + handlers = { + 'save_answer': self.save_answer, + 'score_update': self.update_score, + 'save_post_assessment': self.message_post, + 'skip_post_assessment': self.skip_post_assessment, + 'check_for_score': self.check_for_score, + } + + if dispatch not in handlers: + return 'Error' + + before = self.get_progress() + d = handlers[dispatch](get, system) + after = self.get_progress() + d.update({ + 'progress_changed': after != before, + 'progress_status': Progress.to_js_status_str(after), + }) + return json.dumps(d, cls=ComplexEncoder) + + def check_for_score(self, get, system): + """ + Checks to see if a score has been received yet. + @param get: AJAX get dictionary + @param system: Modulesystem (needed to align with other ajax functions) + @return: Returns the current state + """ + state = self.state + return {'state': state} + + def save_answer(self, get, system): + """ + Saves a student answer + @param get: AJAX get dictionary + @param system: modulesystem + @return: Success indicator + """ + if self.attempts > self.max_attempts: + # If too many attempts, prevent student from saving answer and + # seeing rubric. In normal use, students shouldn't see this because + # they won't see the reset button once they're out of attempts. + return { + 'success': False, + 'error': 'Too many attempts.' + } + + if self.state != self.INITIAL: + return self.out_of_sync_error(get) + + # add new history element with answer and empty score and hint. + self.new_history_entry(get['student_answer']) + self.send_to_grader(get['student_answer'], system) + self.change_state(self.ASSESSING) + + return {'success': True, } + + def update_score(self, get, system): + """ + Updates the current score via ajax. Called by xqueue. + Input: AJAX get dictionary, modulesystem + Output: None + """ + queuekey = get['queuekey'] + score_msg = get['xqueue_body'] + #TODO: Remove need for cmap + self._update_score(score_msg, queuekey, system) + + return dict() # No AJAX return is needed + + def get_html(self, system): + """ + Gets the HTML for this problem and renders it + Input: Modulesystem object + Output: Rendered HTML + """ + #set context variables and render template + if self.state != self.INITIAL: + latest = self.latest_answer() + previous_answer = latest if latest is not None else self.initial_display + post_assessment = self.latest_post_assessment() + score = self.latest_score() + correct = 'correct' if self.is_submission_correct(score) else 'incorrect' + else: + post_assessment = "" + correct = "" + previous_answer = self.initial_display + + context = { + 'prompt': self.prompt, + 'previous_answer': previous_answer, + 'state': self.state, + 'allow_reset': self._allow_reset(), + 'rows': 30, + 'cols': 80, + 'id': 'open_ended', + 'msg': post_assessment, + 'child_type': 'openended', + 'correct': correct, + } + log.debug(context) + html = system.render_template('open_ended.html', context) + return html + + +class OpenEndedDescriptor(XmlDescriptor, EditingDescriptor): + """ + Module for adding open ended response questions to courses + """ + mako_template = "widgets/html-edit.html" + module_class = OpenEndedModule + filename_extension = "xml" + + stores_state = True + has_score = True + template_dir_name = "openended" + + js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} + js_module_name = "HTMLEditingDescriptor" + + @classmethod + def definition_from_xml(cls, xml_object, system): + """ + Pull out the open ended parameters into a dictionary. + + Returns: + { + 'oeparam': 'some-html' + } + """ + for child in ['openendedparam']: + if len(xml_object.xpath(child)) != 1: + raise ValueError("Open Ended definition must include exactly one '{0}' tag".format(child)) + + def parse(k): + """Assumes that xml_object has child k""" + return xml_object.xpath(k)[0] + + return {'oeparam': parse('openendedparam'), } + + + def definition_to_xml(self, resource_fs): + '''Return an xml element representing this definition.''' + elt = etree.Element('openended') + + def add_child(k): + child_str = '<{tag}>{body}'.format(tag=k, body=self.definition[k]) + child_node = etree.fromstring(child_str) + elt.append(child_node) + + for child in ['openendedparam']: + add_child(child) + + return elt + + diff --git a/common/lib/xmodule/xmodule/openendedchild.py b/common/lib/xmodule/xmodule/openendedchild.py new file mode 100644 index 0000000000..2ba9528237 --- /dev/null +++ b/common/lib/xmodule/xmodule/openendedchild.py @@ -0,0 +1,263 @@ +import copy +from fs.errors import ResourceNotFoundError +import itertools +import json +import logging +from lxml import etree +from lxml.html import rewrite_links +from path import path +import os +import sys +import hashlib +import capa.xqueue_interface as xqueue_interface + +from pkg_resources import resource_string + +from .capa_module import only_one, ComplexEncoder +from .editing_module import EditingDescriptor +from .html_checker import check_html +from progress import Progress +from .stringify import stringify_children +from .xml_module import XmlDescriptor +from xmodule.modulestore import Location +from capa.util import * + +from datetime import datetime + +log = logging.getLogger("mitx.courseware") + +# Set the default number of max attempts. Should be 1 for production +# Set higher for debugging/testing +# attempts specified in xml definition overrides this. +MAX_ATTEMPTS = 1 + +# Set maximum available number of points. +# Overriden by max_score specified in xml. +MAX_SCORE = 1 + +class OpenEndedChild(): + """ + States: + + initial (prompt, textbox shown) + | + assessing (read-only textbox, rubric + assessment input shown for self assessment, response queued for open ended) + | + post_assessment (read-only textbox, read-only rubric and assessment, hint input box shown) + | + done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows + a reset button that goes back to initial state. Saves previous + submissions too.) + """ + + DEFAULT_QUEUE = 'open-ended' + DEFAULT_MESSAGE_QUEUE = 'open-ended-message' + max_inputfields = 1 + + STATE_VERSION = 1 + + # states + INITIAL = 'initial' + ASSESSING = 'assessing' + POST_ASSESSMENT = 'post_assessment' + DONE = 'done' + + #This is used to tell students where they are at in the module + HUMAN_NAMES = { + 'initial': 'Started', + 'assessing': 'Being scored', + 'post_assessment': 'Scoring finished', + 'done': 'Problem complete', + } + + def __init__(self, system, location, definition, descriptor, static_data, + instance_state=None, shared_state=None, **kwargs): + # Load instance state + if instance_state is not None: + instance_state = json.loads(instance_state) + else: + instance_state = {} + + # History is a list of tuples of (answer, score, hint), where hint may be + # None for any element, and score and hint can be None for the last (current) + # element. + # Scores are on scale from 0 to max_score + self.history = instance_state.get('history', []) + + self.state = instance_state.get('state', self.INITIAL) + + self.created = instance_state.get('created', False) + + self.attempts = instance_state.get('attempts', 0) + self.max_attempts = static_data['max_attempts'] + + self.prompt = static_data['prompt'] + self.rubric = static_data['rubric'] + + # Used for progress / grading. Currently get credit just for + # completion (doesn't matter if you self-assessed correct/incorrect). + self._max_score = static_data['max_score'] + + self.setup_response(system, location, definition, descriptor) + + def setup_response(self, system, location, definition, descriptor): + """ + Needs to be implemented by the inheritors of this module. Sets up additional fields used by the child modules. + @param system: Modulesystem + @param location: Module location + @param definition: XML definition + @param descriptor: Descriptor of the module + @return: None + """ + pass + + def latest_answer(self): + """None if not available""" + if not self.history: + return "" + return self.history[-1].get('answer', "") + + def latest_score(self): + """None if not available""" + if not self.history: + return None + return self.history[-1].get('score') + + def latest_post_assessment(self): + """None if not available""" + if not self.history: + return "" + return self.history[-1].get('post_assessment', "") + + def new_history_entry(self, answer): + """ + Adds a new entry to the history dictionary + @param answer: The student supplied answer + @return: None + """ + self.history.append({'answer': answer}) + + def record_latest_score(self, score): + """Assumes that state is right, so we're adding a score to the latest + history element""" + self.history[-1]['score'] = score + + def record_latest_post_assessment(self, post_assessment): + """Assumes that state is right, so we're adding a score to the latest + history element""" + self.history[-1]['post_assessment'] = post_assessment + + def change_state(self, new_state): + """ + A centralized place for state changes--allows for hooks. If the + current state matches the old state, don't run any hooks. + """ + if self.state == new_state: + return + + self.state = new_state + + if self.state == self.DONE: + self.attempts += 1 + + def get_instance_state(self): + """ + Get the current score and state + """ + + state = { + 'version': self.STATE_VERSION, + 'history': self.history, + 'state': self.state, + 'max_score': self._max_score, + 'attempts': self.attempts, + 'created': False, + } + return json.dumps(state) + + def _allow_reset(self): + """Can the module be reset?""" + return (self.state == self.DONE and self.attempts < self.max_attempts) + + def max_score(self): + """ + Return max_score + """ + return self._max_score + + def get_score(self): + """ + Returns the last score in the list + """ + score = self.latest_score() + return {'score': score if score is not None else 0, + 'total': self._max_score} + + def reset(self, system): + """ + If resetting is allowed, reset the state. + + Returns {'success': bool, 'error': msg} + (error only present if not success) + """ + self.change_state(self.INITIAL) + return {'success': True} + + def get_progress(self): + ''' + For now, just return last score / max_score + ''' + if self._max_score > 0: + try: + return Progress(self.get_score()['score'], self._max_score) + except Exception as err: + log.exception("Got bad progress") + return None + return None + + def out_of_sync_error(self, get, msg=''): + """ + return dict out-of-sync error message, and also log. + """ + log.warning("Assessment module state out sync. state: %r, get: %r. %s", + self.state, get, msg) + return {'success': False, + 'error': 'The problem state got out-of-sync'} + + def get_html(self): + """ + Needs to be implemented by inheritors. Renders the HTML that students see. + @return: + """ + pass + + def handle_ajax(self): + """ + Needs to be implemented by child modules. Handles AJAX events. + @return: + """ + pass + + def is_submission_correct(self, score): + """ + Checks to see if a given score makes the answer correct. Very naive right now (>66% is correct) + @param score: Numeric score. + @return: Boolean correct. + """ + correct = False + if(isinstance(score, (int, long, float, complex))): + score_ratio = int(score) / float(self.max_score()) + correct = (score_ratio >= 0.66) + return correct + + def is_last_response_correct(self): + """ + Checks to see if the last response in the module is correct. + @return: 'correct' if correct, otherwise 'incorrect' + """ + score = self.get_score()['score'] + correctness = 'correct' if self.is_submission_correct(score) else 'incorrect' + return correctness + + + diff --git a/common/lib/xmodule/xmodule/raw_module.py b/common/lib/xmodule/xmodule/raw_module.py index 5ff16098ac..efdd2e7ba0 100644 --- a/common/lib/xmodule/xmodule/raw_module.py +++ b/common/lib/xmodule/xmodule/raw_module.py @@ -13,7 +13,7 @@ class RawDescriptor(XmlDescriptor, XMLEditingDescriptor): """ @classmethod def definition_from_xml(cls, xml_object, system): - return {'data': etree.tostring(xml_object, pretty_print=True)} + return {'data': etree.tostring(xml_object, pretty_print=True,encoding='unicode')} def definition_to_xml(self, resource_fs): try: diff --git a/common/lib/xmodule/xmodule/self_assessment_module.py b/common/lib/xmodule/xmodule/self_assessment_module.py index 2edf5467b2..940b61c557 100644 --- a/common/lib/xmodule/xmodule/self_assessment_module.py +++ b/common/lib/xmodule/xmodule/self_assessment_module.py @@ -1,189 +1,92 @@ -""" -A Self Assessment module that allows students to write open-ended responses, -submit, then see a rubric and rate themselves. Persists student supplied -hints, answers, and assessment judgment (currently only correct/incorrect). -Parses xml definition file--see below for exact format. -""" - import copy from fs.errors import ResourceNotFoundError +import itertools +import json import logging -import os -import sys from lxml import etree from lxml.html import rewrite_links from path import path -import json -from progress import Progress +import os +import sys from pkg_resources import resource_string from .capa_module import only_one, ComplexEncoder from .editing_module import EditingDescriptor from .html_checker import check_html +from progress import Progress from .stringify import stringify_children from .x_module import XModule from .xml_module import XmlDescriptor from xmodule.modulestore import Location +import openendedchild + +from combined_open_ended_rubric import CombinedOpenEndedRubric log = logging.getLogger("mitx.courseware") -# Set the default number of max attempts. Should be 1 for production -# Set higher for debugging/testing -# attempts specified in xml definition overrides this. -MAX_ATTEMPTS = 1 - -# Set maximum available number of points. -# Overriden by max_score specified in xml. -MAX_SCORE = 1 - -class SelfAssessmentModule(XModule): +class SelfAssessmentModule(openendedchild.OpenEndedChild): """ - States: + A Self Assessment module that allows students to write open-ended responses, + submit, then see a rubric and rate themselves. Persists student supplied + hints, answers, and assessment judgment (currently only correct/incorrect). + Parses xml definition file--see below for exact format. - initial (prompt, textbox shown) - | - assessing (read-only textbox, rubric + assessment input shown) - | - request_hint (read-only textbox, read-only rubric and assessment, hint input box shown) - | - done (submitted msg, green checkmark, everything else read-only. If attempts < max, shows - a reset button that goes back to initial state. Saves previous - submissions too.) + Sample XML format: + + + What hint about this problem would you give to someone? + + + Save Succcesful. Thanks for participating! + + """ - # states - INITIAL = 'initial' - ASSESSING = 'assessing' - REQUEST_HINT = 'request_hint' - DONE = 'done' - - js = {'coffee': [resource_string(__name__, 'js/src/selfassessment/display.coffee')]} - js_module_name = "SelfAssessment" - - def __init__(self, system, location, definition, descriptor, - instance_state=None, shared_state=None, **kwargs): - XModule.__init__(self, system, location, definition, descriptor, - instance_state, shared_state, **kwargs) - + def setup_response(self, system, location, definition, descriptor): """ - Definition file should have 4 blocks -- prompt, rubric, submitmessage, hintprompt, - and two optional attributes: - attempts, which should be an integer that defaults to 1. - If it's > 1, the student will be able to re-submit after they see - the rubric. - max_score, which should be an integer that defaults to 1. - It defines the maximum number of points a student can get. Assumed to be integer scale - from 0 to max_score, with an interval of 1. - - Note: all the submissions are stored. - - Sample file: - - - - Insert prompt text here. (arbitrary html) - - - Insert grading rubric here. (arbitrary html) - - - Please enter a hint below: (arbitrary html) - - - Thanks for submitting! (arbitrary html) - - + Sets up the module + @param system: Modulesystem + @param location: location, to let the module know where it is. + @param definition: XML definition of the module. + @param descriptor: SelfAssessmentDescriptor + @return: None """ - - # Load instance state - if instance_state is not None: - instance_state = json.loads(instance_state) - else: - instance_state = {} - - # Note: score responses are on scale from 0 to max_score - self.student_answers = instance_state.get('student_answers', []) - self.scores = instance_state.get('scores', []) - self.hints = instance_state.get('hints', []) - - self.state = instance_state.get('state', 'initial') - - # Used for progress / grading. Currently get credit just for - # completion (doesn't matter if you self-assessed correct/incorrect). - - self._max_score = int(self.metadata.get('max_score', MAX_SCORE)) - - self.attempts = instance_state.get('attempts', 0) - - self.max_attempts = int(self.metadata.get('attempts', MAX_ATTEMPTS)) - - self.rubric = definition['rubric'] - self.prompt = definition['prompt'] self.submit_message = definition['submitmessage'] self.hint_prompt = definition['hintprompt'] + self.prompt = stringify_children(self.prompt) + self.rubric = stringify_children(self.rubric) - def _allow_reset(self): - """Can the module be reset?""" - return self.state == self.DONE and self.attempts < self.max_attempts - - def get_html(self): + def get_html(self, system): + """ + Gets context and renders HTML that represents the module + @param system: Modulesystem + @return: Rendered HTML + """ #set context variables and render template - if self.state != self.INITIAL and self.student_answers: - previous_answer = self.student_answers[-1] + if self.state != self.INITIAL: + latest = self.latest_answer() + previous_answer = latest if latest is not None else '' else: previous_answer = '' context = { 'prompt': self.prompt, 'previous_answer': previous_answer, - 'ajax_url': self.system.ajax_url, - 'initial_rubric': self.get_rubric_html(), - 'initial_hint': self.get_hint_html(), + 'ajax_url': system.ajax_url, + 'initial_rubric': self.get_rubric_html(system), + 'initial_hint': self.get_hint_html(system), 'initial_message': self.get_message_html(), 'state': self.state, 'allow_reset': self._allow_reset(), + 'child_type': 'selfassessment', } - html = self.system.render_template('self_assessment_prompt.html', context) - # cdodge: perform link substitutions for any references to course static content (e.g. images) - return rewrite_links(html, self.rewrite_content_links) - - def get_score(self): - """ - Returns dict with 'score' key - """ - return {'score': self.get_last_score()} - - def max_score(self): - """ - Return max_score - """ - return self._max_score - - def get_last_score(self): - """ - Returns the last score in the list - """ - last_score=0 - if(len(self.scores)>0): - last_score=self.scores[len(self.scores)-1] - return last_score - - def get_progress(self): - ''' - For now, just return last score / max_score - ''' - if self._max_score > 0: - try: - return Progress(self.get_last_score(), self._max_score) - except Exception as err: - log.exception("Got bad progress") - return None - return None + html = system.render_template('self_assessment_prompt.html', context) + return html - def handle_ajax(self, dispatch, get): + def handle_ajax(self, dispatch, get, system): """ This is called by courseware.module_render, to handle an AJAX call. "get" is request.POST. @@ -197,15 +100,14 @@ class SelfAssessmentModule(XModule): handlers = { 'save_answer': self.save_answer, 'save_assessment': self.save_assessment, - 'save_hint': self.save_hint, - 'reset': self.reset, + 'save_post_assessment': self.save_hint, } if dispatch not in handlers: return 'Error' before = self.get_progress() - d = handlers[dispatch](get) + d = handlers[dispatch](get, system) after = self.get_progress() d.update({ 'progress_changed': after != before, @@ -213,60 +115,54 @@ class SelfAssessmentModule(XModule): }) return json.dumps(d, cls=ComplexEncoder) - def out_of_sync_error(self, get, msg=''): - """ - return dict out-of-sync error message, and also log. - """ - log.warning("Assessment module state out sync. state: %r, get: %r. %s", - self.state, get, msg) - return {'success': False, - 'error': 'The problem state got out-of-sync'} - - def get_rubric_html(self): + def get_rubric_html(self, system): """ Return the appropriate version of the rubric, based on the state. """ if self.state == self.INITIAL: return '' + rubric_html = CombinedOpenEndedRubric.render_rubric(self.rubric) + # we'll render it - context = {'rubric': self.rubric, - 'max_score' : self._max_score, - } + context = {'rubric': rubric_html, + 'max_score': self._max_score, + } if self.state == self.ASSESSING: context['read_only'] = False - elif self.state in (self.REQUEST_HINT, self.DONE): + elif self.state in (self.POST_ASSESSMENT, self.DONE): context['read_only'] = True else: raise ValueError("Illegal state '%r'" % self.state) - return self.system.render_template('self_assessment_rubric.html', context) + return system.render_template('self_assessment_rubric.html', context) - def get_hint_html(self): + def get_hint_html(self, system): """ Return the appropriate version of the hint view, based on state. """ if self.state in (self.INITIAL, self.ASSESSING): return '' - if self.state == self.DONE and len(self.hints) > 0: + if self.state == self.DONE: # display the previous hint - hint = self.hints[-1] + latest = self.latest_post_assessment() + hint = latest if latest is not None else '' else: hint = '' context = {'hint_prompt': self.hint_prompt, 'hint': hint} - if self.state == self.REQUEST_HINT: + if self.state == self.POST_ASSESSMENT: context['read_only'] = False elif self.state == self.DONE: context['read_only'] = True else: raise ValueError("Illegal state '%r'" % self.state) - return self.system.render_template('self_assessment_hint.html', context) + return system.render_template('self_assessment_hint.html', context) def get_message_html(self): """ @@ -278,9 +174,17 @@ class SelfAssessmentModule(XModule): return """
{0}
""".format(self.submit_message) - def save_answer(self, get): + def save_answer(self, get, system): """ After the answer is submitted, show the rubric. + + Args: + get: the GET dictionary passed to the ajax request. Should contain + a key 'student_answer' + + Returns: + Dictionary with keys 'success' and either 'error' (if not success), + or 'rubric_html' (if success). """ # Check to see if attempts are less than max if self.attempts > self.max_attempts: @@ -295,15 +199,16 @@ class SelfAssessmentModule(XModule): if self.state != self.INITIAL: return self.out_of_sync_error(get) - self.student_answers.append(get['student_answer']) - self.state = self.ASSESSING + # add new history element with answer and empty score and hint. + self.new_history_entry(get['student_answer']) + self.change_state(self.ASSESSING) return { 'success': True, - 'rubric_html': self.get_rubric_html() - } + 'rubric_html': self.get_rubric_html(system) + } - def save_assessment(self, get): + def save_assessment(self, get, system): """ Save the assessment. If the student said they're right, don't ask for a hint, and go straight to the done state. Otherwise, do ask for a hint. @@ -318,34 +223,30 @@ class SelfAssessmentModule(XModule): 'message_html' only if success is true """ - n_answers = len(self.student_answers) - n_scores = len(self.scores) - if (self.state != self.ASSESSING or n_answers != n_scores + 1): - msg = "%d answers, %d scores" % (n_answers, n_scores) - return self.out_of_sync_error(get, msg) + if self.state != self.ASSESSING: + return self.out_of_sync_error(get) try: score = int(get['assessment']) - except: + except ValueError: return {'success': False, 'error': "Non-integer score value"} - self.scores.append(score) + self.record_latest_score(score) - d = {'success': True,} + d = {'success': True, } if score == self.max_score(): - self.state = self.DONE + self.change_state(self.DONE) d['message_html'] = self.get_message_html() d['allow_reset'] = self._allow_reset() else: - self.state = self.REQUEST_HINT - d['hint_html'] = self.get_hint_html() + self.change_state(self.POST_ASSESSMENT) + d['hint_html'] = self.get_hint_html(system) d['state'] = self.state return d - - def save_hint(self, get): + def save_hint(self, get, system): ''' Save the hint. Returns a dict { 'success': bool, @@ -355,68 +256,19 @@ class SelfAssessmentModule(XModule): with the error key only present if success is False and message_html only if True. ''' - if self.state != self.REQUEST_HINT: + if self.state != self.POST_ASSESSMENT: # Note: because we only ask for hints on wrong answers, may not have # the same number of hints and answers. return self.out_of_sync_error(get) - self.hints.append(get['hint'].lower()) - self.state = self.DONE - - # increment attempts - self.attempts = self.attempts + 1 - - # To the tracking logs! - event_info = { - 'selfassessment_id': self.location.url(), - 'state': { - 'student_answers': self.student_answers, - 'score': self.scores, - 'hints': self.hints, - } - } - self.system.track_function('save_hint', event_info) + self.record_latest_post_assessment(get['hint']) + self.change_state(self.DONE) return {'success': True, 'message_html': self.get_message_html(), 'allow_reset': self._allow_reset()} - def reset(self, get): - """ - If resetting is allowed, reset the state. - - Returns {'success': bool, 'error': msg} - (error only present if not success) - """ - if self.state != self.DONE: - return self.out_of_sync_error(get) - - if self.attempts > self.max_attempts: - return { - 'success': False, - 'error': 'Too many attempts.' - } - self.state = self.INITIAL - return {'success': True} - - - def get_instance_state(self): - """ - Get the current score and state - """ - - state = { - 'student_answers': self.student_answers, - 'hints': self.hints, - 'state': self.state, - 'scores': self.scores, - 'max_score': self._max_score, - 'attempts': self.attempts - } - return json.dumps(state) - - class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """ Module for adding self assessment questions to courses @@ -439,13 +291,11 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): Returns: { - 'rubric': 'some-html', - 'prompt': 'some-html', 'submitmessage': 'some-html' 'hintprompt': 'some-html' } """ - expected_children = ['rubric', 'prompt', 'submitmessage', 'hintprompt'] + expected_children = ['submitmessage', 'hintprompt'] for child in expected_children: if len(xml_object.xpath(child)) != 1: raise ValueError("Self assessment definition must include exactly one '{0}' tag".format(child)) @@ -454,12 +304,9 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): """Assumes that xml_object has child k""" return stringify_children(xml_object.xpath(k)[0]) - return {'rubric': parse('rubric'), - 'prompt': parse('prompt'), - 'submitmessage': parse('submitmessage'), + return {'submitmessage': parse('submitmessage'), 'hintprompt': parse('hintprompt'), - } - + } def definition_to_xml(self, resource_fs): '''Return an xml element representing this definition.''' @@ -470,7 +317,7 @@ class SelfAssessmentDescriptor(XmlDescriptor, EditingDescriptor): child_node = etree.fromstring(child_str) elt.append(child_node) - for child in ['rubric', 'prompt', 'submitmessage', 'hintprompt']: + for child in ['submitmessage', 'hintprompt']: add_child(child) return elt diff --git a/common/lib/xmodule/xmodule/seq_module.py b/common/lib/xmodule/xmodule/seq_module.py index b625646e66..c0c3ee3ca1 100644 --- a/common/lib/xmodule/xmodule/seq_module.py +++ b/common/lib/xmodule/xmodule/seq_module.py @@ -10,7 +10,7 @@ from xmodule.progress import Progress from xmodule.exceptions import NotFoundError from pkg_resources import resource_string -log = logging.getLogger("mitx.common.lib.seq_module") +log = logging.getLogger(__name__) # HACK: This shouldn't be hard-coded to two types # OBSOLETE: This obsoletes 'type' @@ -124,7 +124,7 @@ class SequenceDescriptor(MakoModuleDescriptor, XmlDescriptor): children = [] for child in xml_object: try: - children.append(system.process_xml(etree.tostring(child)).location.url()) + children.append(system.process_xml(etree.tostring(child, encoding='unicode')).location.url()) except: log.exception("Unable to load child when parsing Sequence. Continuing...") continue diff --git a/common/lib/xmodule/xmodule/stringify.py b/common/lib/xmodule/xmodule/stringify.py index 1e3fa91210..dab8ff0425 100644 --- a/common/lib/xmodule/xmodule/stringify.py +++ b/common/lib/xmodule/xmodule/stringify.py @@ -22,7 +22,7 @@ def stringify_children(node): # next element. parts = [node.text] for c in node.getchildren(): - parts.append(etree.tostring(c, with_tail=True)) + parts.append(etree.tostring(c, with_tail=True, encoding='unicode')) # filter removes possible Nones in texts and tails - return ''.join(filter(None, parts)) + return u''.join(filter(None, parts)) diff --git a/common/lib/xmodule/xmodule/template_module.py b/common/lib/xmodule/xmodule/template_module.py index 13eab038ec..d3fb0aab5e 100644 --- a/common/lib/xmodule/xmodule/template_module.py +++ b/common/lib/xmodule/xmodule/template_module.py @@ -58,7 +58,7 @@ class CustomTagDescriptor(RawDescriptor): params = dict(xmltree.items()) with system.resources_fs.open('custom_tags/{name}' .format(name=template_name)) as template: - return Template(template.read()).render(**params) + return Template(template.read().decode('utf-8')).render(**params) def __init__(self, system, definition, **kwargs): diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index ed64c45118..a07f1ddfaf 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -4,7 +4,7 @@ unittests for xmodule Run like this: rake test_common/lib/xmodule - + """ import unittest @@ -19,11 +19,12 @@ import xmodule from xmodule.x_module import ModuleSystem from mock import Mock -i4xs = ModuleSystem( +test_system = ModuleSystem( ajax_url='courses/course_id/modx/a_location', track_function=Mock(), get_module=Mock(), - render_template=Mock(), + # "render" to just the context... + render_template=lambda template, context: str(context), replace_urls=Mock(), user=Mock(), filestore=Mock(), diff --git a/common/lib/xmodule/xmodule/tests/test_course_module.py b/common/lib/xmodule/xmodule/tests/test_course_module.py new file mode 100644 index 0000000000..63eaec1f61 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_course_module.py @@ -0,0 +1,90 @@ +import unittest +from time import strptime, gmtime +from fs.memoryfs import MemoryFS + +from mock import Mock, patch + +from xmodule.modulestore.xml import ImportSystem, XMLModuleStore + + +ORG = 'test_org' +COURSE = 'test_course' + +NOW = strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00') + + +class DummySystem(ImportSystem): + @patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS()) + def __init__(self, load_error_modules): + + xmlstore = XMLModuleStore("data_dir", course_dirs=[], + load_error_modules=load_error_modules) + course_id = "/".join([ORG, COURSE, 'test_run']) + course_dir = "test_dir" + policy = {} + error_tracker = Mock() + parent_tracker = Mock() + + super(DummySystem, self).__init__( + xmlstore, + course_id, + course_dir, + policy, + error_tracker, + parent_tracker, + load_error_modules=load_error_modules, + ) + + +class IsNewCourseTestCase(unittest.TestCase): + """Make sure the property is_new works on courses""" + @staticmethod + def get_dummy_course(start, is_new=None, load_error_modules=True): + """Get a dummy course""" + + system = DummySystem(load_error_modules) + is_new = '' if is_new is None else 'is_new="{0}"'.format(is_new).lower() + + start_xml = ''' + + + Two houses, ... + + + '''.format(org=ORG, course=COURSE, start=start, is_new=is_new) + + return system.process_xml(start_xml) + + @patch('xmodule.course_module.time.gmtime') + def test_non_started_yet(self, gmtime_mock): + descriptor = self.get_dummy_course(start='2013-01-05T12:00') + gmtime_mock.return_value = NOW + assert(descriptor.is_new == True) + assert(descriptor.days_until_start == 4) + + @patch('xmodule.course_module.time.gmtime') + def test_already_started(self, gmtime_mock): + gmtime_mock.return_value = NOW + + descriptor = self.get_dummy_course(start='2012-12-02T12:00') + assert(descriptor.is_new == False) + assert(descriptor.days_until_start < 0) + + @patch('xmodule.course_module.time.gmtime') + def test_is_new_set(self, gmtime_mock): + gmtime_mock.return_value = NOW + + descriptor = self.get_dummy_course(start='2012-12-02T12:00', is_new=True) + assert(descriptor.is_new == True) + assert(descriptor.days_until_start < 0) + + descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=False) + assert(descriptor.is_new == False) + assert(descriptor.days_until_start > 0) + + descriptor = self.get_dummy_course(start='2013-02-02T12:00', is_new=True) + assert(descriptor.is_new == True) + assert(descriptor.days_until_start > 0) diff --git a/common/lib/xmodule/xmodule/tests/test_export.py b/common/lib/xmodule/xmodule/tests/test_export.py index aeebc6da6b..f92d58db03 100644 --- a/common/lib/xmodule/xmodule/tests/test_export.py +++ b/common/lib/xmodule/xmodule/tests/test_export.py @@ -39,9 +39,12 @@ def strip_filenames(descriptor): class RoundTripTestCase(unittest.TestCase): - '''Check that our test courses roundtrip properly''' + ''' Check that our test courses roundtrip properly. + Same course imported , than exported, then imported again. + And we compare original import with second import (after export). + Thus we make sure that export and import work properly. + ''' def check_export_roundtrip(self, data_dir, course_dir): - root_dir = path(mkdtemp()) print "Copying test course to temp dir {0}".format(root_dir) @@ -117,3 +120,11 @@ class RoundTripTestCase(unittest.TestCase): def test_selfassessment_roundtrip(self): #Test selfassessment xmodule to see if it exports correctly self.check_export_roundtrip(DATA_DIR,"self_assessment") + + def test_graphicslidertool_roundtrip(self): + #Test graphicslidertool xmodule to see if it exports correctly + self.check_export_roundtrip(DATA_DIR,"graphic_slider_tool") + + def test_exam_registration_roundtrip(self): + # Test exam_registration xmodule to see if it exports correctly + self.check_export_roundtrip(DATA_DIR,"test_exam_registration") diff --git a/common/lib/xmodule/xmodule/tests/test_import.py b/common/lib/xmodule/xmodule/tests/test_import.py index 77532959d7..90ec112f19 100644 --- a/common/lib/xmodule/xmodule/tests/test_import.py +++ b/common/lib/xmodule/xmodule/tests/test_import.py @@ -352,3 +352,19 @@ class ImportTestCase(unittest.TestCase): sa_sample = modulestore.get_instance(sa_id, location) #10 attempts is hard coded into SampleQuestion, which is the url_name of a selfassessment xml tag self.assertEqual(sa_sample.metadata['attempts'], '10') + + def test_graphicslidertool_import(self): + ''' + Check to see if definition_from_xml in gst_module.py + works properly. Pulls data from the graphic_slider_tool directory + in the test data directory. + ''' + modulestore = XMLModuleStore(DATA_DIR, course_dirs=['graphic_slider_tool']) + + sa_id = "edX/gst_test/2012_Fall" + location = Location(["i4x", "edX", "gst_test", "graphical_slider_tool", "sample_gst"]) + gst_sample = modulestore.get_instance(sa_id, location) + render_string_from_sample_gst_xml = """ + \ +""".strip() + self.assertEqual(gst_sample.definition['render'], render_string_from_sample_gst_xml) diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py index 94a0a19d7c..cb011cdc2b 100644 --- a/common/lib/xmodule/xmodule/tests/test_progress.py +++ b/common/lib/xmodule/xmodule/tests/test_progress.py @@ -5,7 +5,7 @@ import unittest from xmodule.progress import Progress from xmodule import x_module -from . import i4xs +from . import test_system class ProgressTest(unittest.TestCase): ''' Test that basic Progress objects work. A Progress represents a @@ -133,6 +133,6 @@ class ModuleProgressTest(unittest.TestCase): ''' def test_xmodule_default(self): '''Make sure default get_progress exists, returns None''' - xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {}) + xm = x_module.XModule(test_system, 'a://b/c/d/e', None, {}) p = xm.get_progress() self.assertEqual(p, None) diff --git a/common/lib/xmodule/xmodule/tests/test_self_assessment.py b/common/lib/xmodule/xmodule/tests/test_self_assessment.py new file mode 100644 index 0000000000..d89190b1e0 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_self_assessment.py @@ -0,0 +1,54 @@ +import json +from mock import Mock +import unittest + +from xmodule.self_assessment_module import SelfAssessmentModule +from xmodule.modulestore import Location + +from . import test_system + +class SelfAssessmentTest(unittest.TestCase): + + definition = {'rubric': 'A rubric', + 'prompt': 'Who?', + 'submitmessage': 'Shall we submit now?', + 'hintprompt': 'Consider this...', + } + + location = Location(["i4x", "edX", "sa_test", "selfassessment", + "SampleQuestion"]) + + metadata = {'attempts': '10'} + + descriptor = Mock() + + def test_import(self): + state = json.dumps({'student_answers': ["Answer 1", "answer 2", "answer 3"], + 'scores': [0, 1], + 'hints': ['o hai'], + 'state': SelfAssessmentModule.ASSESSING, + 'attempts': 2}) + + module = SelfAssessmentModule(test_system, self.location, + self.definition, self.descriptor, + state, {}, metadata=self.metadata) + + self.assertEqual(module.get_score()['score'], 0) + + self.assertTrue('answer 3' in module.get_html()) + self.assertFalse('answer 2' in module.get_html()) + + module.save_assessment({'assessment': '0'}) + self.assertEqual(module.state, module.REQUEST_HINT) + + module.save_hint({'hint': 'hint for ans 3'}) + self.assertEqual(module.state, module.DONE) + + d = module.reset({}) + self.assertTrue(d['success']) + self.assertEqual(module.state, module.INITIAL) + + # if we now assess as right, skip the REQUEST_HINT state + module.save_answer({'student_answer': 'answer 4'}) + module.save_assessment({'assessment': '1'}) + self.assertEqual(module.state, module.DONE) diff --git a/common/lib/xmodule/xmodule/timeparse.py b/common/lib/xmodule/xmodule/timeparse.py index 117105d085..36c0f725e5 100644 --- a/common/lib/xmodule/xmodule/timeparse.py +++ b/common/lib/xmodule/xmodule/timeparse.py @@ -7,8 +7,11 @@ TIME_FORMAT = "%Y-%m-%dT%H:%M" def parse_time(time_str): """ - Takes a time string in TIME_FORMAT, returns - it as a time_struct. Raises ValueError if the string is not in the right format. + Takes a time string in TIME_FORMAT + + Returns it as a time_struct. + + Raises ValueError if the string is not in the right format. """ return time.strptime(time_str, TIME_FORMAT) diff --git a/common/lib/xmodule/xmodule/video_module.py b/common/lib/xmodule/xmodule/video_module.py index 9a22950ca8..801e70fd06 100644 --- a/common/lib/xmodule/xmodule/video_module.py +++ b/common/lib/xmodule/xmodule/video_module.py @@ -7,6 +7,9 @@ from pkg_resources import resource_string, resource_listdir from xmodule.x_module import XModule from xmodule.raw_module import RawDescriptor +import datetime +import time + log = logging.getLogger(__name__) @@ -33,6 +36,7 @@ class VideoModule(XModule): self.show_captions = xmltree.get('show_captions', 'true') self.source = self._get_source(xmltree) self.track = self._get_track(xmltree) + self.start_time, self.end_time = self._get_timeframe(xmltree) if instance_state is not None: state = json.loads(instance_state) @@ -42,11 +46,11 @@ class VideoModule(XModule): def _get_source(self, xmltree): # find the first valid source return self._get_first_external(xmltree, 'source') - + def _get_track(self, xmltree): # find the first valid track return self._get_first_external(xmltree, 'track') - + def _get_first_external(self, xmltree, tag): """ Will return the first valid element @@ -61,6 +65,23 @@ class VideoModule(XModule): break return result + def _get_timeframe(self, xmltree): + """ Converts 'from' and 'to' parameters in video tag to seconds. + If there are no parameters, returns empty string. """ + + def parse_time(s): + """Converts s in '12:34:45' format to seconds. If s is + None, returns empty string""" + if s is None: + return '' + else: + x = time.strptime(s, '%H:%M:%S') + return datetime.timedelta(hours=x.tm_hour, + minutes=x.tm_min, + seconds=x.tm_sec).total_seconds() + + return parse_time(xmltree.get('from')), parse_time(xmltree.get('to')) + def handle_ajax(self, dispatch, get): ''' Handle ajax calls to this video. @@ -98,11 +119,13 @@ class VideoModule(XModule): 'id': self.location.html_id(), 'position': self.position, 'source': self.source, - 'track' : self.track, + 'track': self.track, 'display_name': self.display_name, # TODO (cpennington): This won't work when we move to data that isn't on the filesystem 'data_dir': self.metadata['data_dir'], - 'show_captions': self.show_captions + 'show_captions': self.show_captions, + 'start': self.start_time, + 'end': self.end_time }) diff --git a/common/lib/xmodule/xmodule/x_module.py b/common/lib/xmodule/xmodule/x_module.py index 2b2e709bcb..a1d9cdda9b 100644 --- a/common/lib/xmodule/xmodule/x_module.py +++ b/common/lib/xmodule/xmodule/x_module.py @@ -233,17 +233,17 @@ class XModule(HTMLSnippet): self._loaded_children = [c for c in children if c is not None] return self._loaded_children - + def get_children_locations(self): ''' Returns the locations of each of child modules. - + Overriding this changes the behavior of get_children and anything that uses get_children, such as get_display_items. - + This method will not instantiate the modules of the children unless absolutely necessary, so it is cheaper to call than get_children - + These children will be the same children returned by the descriptor unless descriptor.has_dynamic_children() is true. ''' @@ -288,8 +288,20 @@ class XModule(HTMLSnippet): return '{}' def get_score(self): - ''' Score the student received on the problem. - ''' + """ + Score the student received on the problem, or None if there is no + score. + + Returns: + dictionary + {'score': integer, from 0 to get_max_score(), + 'total': get_max_score()} + + NOTE (vshnayder): not sure if this was the intended return value, but + that's what it's doing now. I suspect that we really want it to just + return a number. Would need to change (at least) capa and + modx_dispatch to match if we did that. + """ return None def max_score(self): @@ -319,7 +331,7 @@ class XModule(HTMLSnippet): get is a dictionary-like object ''' return "" - # cdodge: added to support dynamic substitutions of + # cdodge: added to support dynamic substitutions of # links for courseware assets (e.g. images). is passed through from lxml.html parser def rewrite_content_links(self, link): # see if we start with our format, e.g. 'xasset:' @@ -402,13 +414,17 @@ class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates): 'xqa_key', # TODO: This is used by the XMLModuleStore to provide for locations for # static files, and will need to be removed when that code is removed - 'data_dir' + 'data_dir', + # How many days early to show a course element to beta testers (float) + # intended to be set per-course, but can be overridden in for specific + # elements. Can be a float. + 'days_early_for_beta' ) # cdodge: this is a list of metadata names which are 'system' metadata # and should not be edited by an end-user system_metadata_fields = [ 'data_dir' ] - + # A list of descriptor attributes that must be equal for the descriptors to # be equal equality_attributes = ('definition', 'metadata', 'location', @@ -485,12 +501,26 @@ class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates): @property def start(self): """ - If self.metadata contains start, return it. Else return None. + If self.metadata contains a valid start time, return it as a time struct. + Else return None. """ if 'start' not in self.metadata: return None return self._try_parse_time('start') + @property + def days_early_for_beta(self): + """ + If self.metadata contains start, return the number, as a float. Else return None. + """ + if 'days_early_for_beta' not in self.metadata: + return None + try: + return float(self.metadata['days_early_for_beta']) + except ValueError: + return None + + @property def own_metadata(self): """ @@ -562,18 +592,18 @@ class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates): self, metadata=self.metadata ) - - + + def has_dynamic_children(self): """ Returns True if this descriptor has dynamic children for a given student when the module is created. - + Returns False if the children of this descriptor are the same - children that the module will return for any student. + children that the module will return for any student. """ return False - + # ================================= JSON PARSING =========================== @staticmethod @@ -703,7 +733,8 @@ class XModuleDescriptor(Plugin, HTMLSnippet, ResourceTemplates): """ Parse an optional metadata key containing a time: if present, complain if it doesn't parse. - Return None if not present or invalid. + + Returns a time_struct, or None if metadata key is not present or is invalid. """ if key in self.metadata: try: @@ -797,7 +828,8 @@ class ModuleSystem(object): debug=False, xqueue=None, node_path="", - anonymous_student_id=''): + anonymous_student_id='', + course_id=None): ''' Create a closure around the system environment. @@ -832,6 +864,8 @@ class ModuleSystem(object): ajax results. anonymous_student_id - Used for tracking modules with student id + + course_id - the course_id containing this module ''' self.ajax_url = ajax_url self.xqueue = xqueue @@ -844,6 +878,7 @@ class ModuleSystem(object): self.replace_urls = replace_urls self.node_path = node_path self.anonymous_student_id = anonymous_student_id + self.course_id = course_id self.user_is_staff = user is not None and user.is_staff def get(self, attr): diff --git a/common/lib/xmodule/xmodule/xml_module.py b/common/lib/xmodule/xmodule/xml_module.py index ec755af4ef..9ad36f633d 100644 --- a/common/lib/xmodule/xmodule/xml_module.py +++ b/common/lib/xmodule/xmodule/xml_module.py @@ -1,19 +1,20 @@ -from xmodule.x_module import (XModuleDescriptor, policy_key) -from xmodule.modulestore import Location -from lxml import etree import json import copy import logging -import traceback -from collections import namedtuple -from fs.errors import ResourceNotFoundError import os import sys +from collections import namedtuple +from lxml import etree + +from xmodule.x_module import (XModuleDescriptor, policy_key) +from xmodule.modulestore import Location log = logging.getLogger(__name__) +# assume all XML files are persisted as utf-8. edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False, - remove_comments=True, remove_blank_text=True) + remove_comments=True, remove_blank_text=True, + encoding='utf-8') def name_to_pathname(name): """ @@ -93,12 +94,18 @@ class XmlDescriptor(XModuleDescriptor): 'start', 'due', 'graded', 'display_name', 'url_name', 'hide_from_toc', 'ispublic', # if True, then course is listed for all users; see 'xqa_key', # for xqaa server access + # information about testcenter exams is a dict (of dicts), not a string, + # so it cannot be easily exportable as a course element's attribute. + 'testcenter_info', # VS[compat] Remove once unused. 'name', 'slug') metadata_to_strip = ('data_dir', - # VS[compat] -- remove the below attrs once everything is in the CMS - 'course', 'org', 'url_name', 'filename') + # information about testcenter exams is a dict (of dicts), not a string, + # so it cannot be easily exportable as a course element's attribute. + 'testcenter_info', + # VS[compat] -- remove the below attrs once everything is in the CMS + 'course', 'org', 'url_name', 'filename') # A dictionary mapping xml attribute names AttrMaps that describe how # to import and export them @@ -366,7 +373,7 @@ class XmlDescriptor(XModuleDescriptor): filepath = self.__class__._format_filepath(self.category, url_path) resource_fs.makedir(os.path.dirname(filepath), allow_recreate=True) with resource_fs.open(filepath, 'w') as file: - file.write(etree.tostring(xml_object, pretty_print=True)) + file.write(etree.tostring(xml_object, pretty_print=True, encoding='utf-8')) # And return just a pointer with the category and filename. record_object = etree.Element(self.category) @@ -381,7 +388,7 @@ class XmlDescriptor(XModuleDescriptor): record_object.set('org', self.location.org) record_object.set('course', self.location.course) - return etree.tostring(record_object, pretty_print=True) + return etree.tostring(record_object, pretty_print=True, encoding='utf-8') def definition_to_xml(self, resource_fs): """ diff --git a/common/static/js/vendor/RequireJS.js b/common/static/js/vendor/RequireJS.js new file mode 100644 index 0000000000..a0526930ef --- /dev/null +++ b/common/static/js/vendor/RequireJS.js @@ -0,0 +1,57 @@ +/* + * This file is a wrapper for the Require JS file and module loader. Please see + * the discussion at: + * + * https://edx-wiki.atlassian.net/wiki/display/LMS/Integration+of+Require+JS+into+the+system + */ + +var RequireJS = function() { + +// Below is the unmodified minified version of Require JS. The latest can be +// found at: +// +// http://requirejs.org/docs/download.html + +/* + RequireJS 2.1.2 Copyright (c) 2010-2012, The Dojo Foundation All Rights Reserved. + Available via the MIT or new BSD license. + see: http://github.com/jrburke/requirejs for details +*/ +var requirejs,require,define; +(function(Y){function H(b){return"[object Function]"===L.call(b)}function I(b){return"[object Array]"===L.call(b)}function x(b,c){if(b){var d;for(d=0;dthis.depCount&&!this.defined){if(H(n)){if(this.events.error)try{e=j.execCb(c,n,b,e)}catch(d){a=d}else e=j.execCb(c,n,b,e);this.map.isDefine&&((b=this.module)&&void 0!==b.exports&&b.exports!==this.exports?e=b.exports:void 0===e&&this.usingExports&&(e=this.exports));if(a)return a.requireMap=this.map,a.requireModules=[this.map.id],a.requireType="define",C(this.error=a)}else e=n;this.exports=e;if(this.map.isDefine&& +!this.ignore&&(p[c]=e,l.onResourceLoad))l.onResourceLoad(j,this.map,this.depMaps);delete k[c];this.defined=!0}this.defining=!1;this.defined&&!this.defineEmitted&&(this.defineEmitted=!0,this.emit("defined",this.exports),this.defineEmitComplete=!0)}}else this.fetch()}},callPlugin:function(){var a=this.map,b=a.id,d=h(a.prefix);this.depMaps.push(d);s(d,"defined",t(this,function(e){var n,d;d=this.map.name;var v=this.map.parentMap?this.map.parentMap.name:null,f=j.makeRequire(a.parentMap,{enableBuildCallback:!0, +skipMap:!0});if(this.map.unnormalized){if(e.normalize&&(d=e.normalize(d,function(a){return c(a,v,!0)})||""),e=h(a.prefix+"!"+d,this.map.parentMap),s(e,"defined",t(this,function(a){this.init([],function(){return a},null,{enabled:!0,ignore:!0})})),d=i(k,e.id)){this.depMaps.push(e);if(this.events.error)d.on("error",t(this,function(a){this.emit("error",a)}));d.enable()}}else n=t(this,function(a){this.init([],function(){return a},null,{enabled:!0})}),n.error=t(this,function(a){this.inited=!0;this.error= +a;a.requireModules=[b];E(k,function(a){0===a.map.id.indexOf(b+"_unnormalized")&&delete k[a.map.id]});C(a)}),n.fromText=t(this,function(e,c){var d=a.name,u=h(d),v=O;c&&(e=c);v&&(O=!1);q(u);r(m.config,b)&&(m.config[d]=m.config[b]);try{l.exec(e)}catch(k){throw Error("fromText eval for "+d+" failed: "+k);}v&&(O=!0);this.depMaps.push(u);j.completeLoad(d);f([d],n)}),e.load(a.name,f,n,m)}));j.enable(d,this);this.pluginMaps[d.id]=d},enable:function(){this.enabling=this.enabled=!0;x(this.depMaps,t(this,function(a, +b){var c,e;if("string"===typeof a){a=h(a,this.map.isDefine?this.map:this.map.parentMap,!1,!this.skipMap);this.depMaps[b]=a;if(c=i(N,a.id)){this.depExports[b]=c(this);return}this.depCount+=1;s(a,"defined",t(this,function(a){this.defineDep(b,a);this.check()}));this.errback&&s(a,"error",this.errback)}c=a.id;e=k[c];!r(N,c)&&(e&&!e.enabled)&&j.enable(a,this)}));E(this.pluginMaps,t(this,function(a){var b=i(k,a.id);b&&!b.enabled&&j.enable(a,this)}));this.enabling=!1;this.check()},on:function(a,b){var c= +this.events[a];c||(c=this.events[a]=[]);c.push(b)},emit:function(a,b){x(this.events[a],function(a){a(b)});"error"===a&&delete this.events[a]}};j={config:m,contextName:b,registry:k,defined:p,urlFetched:S,defQueue:F,Module:W,makeModuleMap:h,nextTick:l.nextTick,configure:function(a){a.baseUrl&&"/"!==a.baseUrl.charAt(a.baseUrl.length-1)&&(a.baseUrl+="/");var b=m.pkgs,c=m.shim,e={paths:!0,config:!0,map:!0};E(a,function(a,b){e[b]?"map"===b?Q(m[b],a,!0,!0):Q(m[b],a,!0):m[b]=a});a.shim&&(E(a.shim,function(a, +b){I(a)&&(a={deps:a});if((a.exports||a.init)&&!a.exportsFn)a.exportsFn=j.makeShimExports(a);c[b]=a}),m.shim=c);a.packages&&(x(a.packages,function(a){a="string"===typeof a?{name:a}:a;b[a.name]={name:a.name,location:a.location||a.name,main:(a.main||"main").replace(ga,"").replace(aa,"")}}),m.pkgs=b);E(k,function(a,b){!a.inited&&!a.map.unnormalized&&(a.map=h(b))});if(a.deps||a.callback)j.require(a.deps||[],a.callback)},makeShimExports:function(a){return function(){var b;a.init&&(b=a.init.apply(Y,arguments)); +return b||a.exports&&Z(a.exports)}},makeRequire:function(a,d){function f(e,c,u){var i,m;d.enableBuildCallback&&(c&&H(c))&&(c.__requireJsBuild=!0);if("string"===typeof e){if(H(c))return C(J("requireargs","Invalid require call"),u);if(a&&r(N,e))return N[e](k[a.id]);if(l.get)return l.get(j,e,a);i=h(e,a,!1,!0);i=i.id;return!r(p,i)?C(J("notloaded",'Module name "'+i+'" has not been loaded yet for context: '+b+(a?"":". Use require([])"))):p[i]}K();j.nextTick(function(){K();m=q(h(null,a));m.skipMap=d.skipMap; +m.init(e,c,u,{enabled:!0});B()});return f}d=d||{};Q(f,{isBrowser:z,toUrl:function(b){var d=b.lastIndexOf("."),g=null;-1!==d&&(g=b.substring(d,b.length),b=b.substring(0,d));return j.nameToUrl(c(b,a&&a.id,!0),g)},defined:function(b){return r(p,h(b,a,!1,!0).id)},specified:function(b){b=h(b,a,!1,!0).id;return r(p,b)||r(k,b)}});a||(f.undef=function(b){w();var c=h(b,a,!0),d=i(k,b);delete p[b];delete S[c.url];delete X[b];d&&(d.events.defined&&(X[b]=d.events),delete k[b])});return f},enable:function(a){i(k, +a.id)&&q(a).enable()},completeLoad:function(a){var b,c,d=i(m.shim,a)||{},h=d.exports;for(w();F.length;){c=F.shift();if(null===c[0]){c[0]=a;if(b)break;b=!0}else c[0]===a&&(b=!0);D(c)}c=i(k,a);if(!b&&!r(p,a)&&c&&!c.inited){if(m.enforceDefine&&(!h||!Z(h)))return y(a)?void 0:C(J("nodefine","No define call for "+a,null,[a]));D([a,d.deps||[],d.exportsFn])}B()},nameToUrl:function(a,b){var c,d,h,f,j,k;if(l.jsExtRegExp.test(a))f=a+(b||"");else{c=m.paths;d=m.pkgs;f=a.split("/");for(j=f.length;0f.attachEvent.toString().indexOf("[native code"))&&!V?(O=!0,f.attachEvent("onreadystatechange", +b.onScriptLoad)):(f.addEventListener("load",b.onScriptLoad,!1),f.addEventListener("error",b.onScriptError,!1)),f.src=d,K=f,D?A.insertBefore(f,D):A.appendChild(f),K=null,f;$&&(importScripts(d),b.completeLoad(c))};z&&M(document.getElementsByTagName("script"),function(b){A||(A=b.parentNode);if(s=b.getAttribute("data-main"))return q.baseUrl||(G=s.split("/"),ba=G.pop(),ca=G.length?G.join("/")+"/":"./",q.baseUrl=ca,s=ba),s=s.replace(aa,""),q.deps=q.deps?q.deps.concat(s):[s],!0});define=function(b,c,d){var i, +f;"string"!==typeof b&&(d=c,c=b,b=null);I(c)||(d=c,c=[]);!c.length&&H(d)&&d.length&&(d.toString().replace(ia,"").replace(ja,function(b,d){c.push(d)}),c=(1===d.length?["require"]:["require","exports","module"]).concat(c));if(O){if(!(i=K))P&&"interactive"===P.readyState||M(document.getElementsByTagName("script"),function(b){if("interactive"===b.readyState)return P=b}),i=P;i&&(b||(b=i.getAttribute("data-requiremodule")),f=B[i.getAttribute("data-requirecontext")])}(f?f.defQueue:R).push([b,c,d])};define.amd= +{jQuery:!0};l.exec=function(b){return eval(b)};l(q)}})(this); + +// The object which will be globally available via RequireJS variable. +return { + 'requirejs': requirejs, + 'require': require, + 'define': define +}; +}(); // End-of: var RequireJS = function() diff --git a/common/templates/mathjax_include.html b/common/templates/mathjax_include.html index 31a5358ece..803f2145a4 100644 --- a/common/templates/mathjax_include.html +++ b/common/templates/mathjax_include.html @@ -33,4 +33,4 @@ - + diff --git a/common/test/data/full/about/faq.html b/common/test/data/full/about/faq.html index a5e54c9f15..a173e46753 100644 --- a/common/test/data/full/about/faq.html +++ b/common/test/data/full/about/faq.html @@ -6,7 +6,7 @@

No - anyone and everyone is welcome to take this course.

  • What textbook should I buy? -

    Although the lectures are designed to be self-contained, we recommend (but do not require) that students refer to the book Worlds Together, Worlds Apart: A History of the World: From 1000 CE to the Present (W W Norton, 3rd edition) -- Volume II, which was written specifically for this course.

    +

    Although the lectures are designed to be self-contained, we recommend (but do not require) that students refer to the book Worlds Together, Worlds Apart: A History of the World: From 1000 CE to the Present (W W Norton, 3rd edition) — Volume II, which was written specifically for this course.

  • Does Harvard award credentials or reports regarding my work in this course?

    Princeton does not award credentials or issue reports for student work in this course. However, Coursera could maintain a record of your score on the assessments and, with your permission, verify that score for authorized parties.

    diff --git a/common/test/data/full/chapter/Overview.xml b/common/test/data/full/chapter/Overview.xml index a11a11a1e0..8ad44b366c 100644 --- a/common/test/data/full/chapter/Overview.xml +++ b/common/test/data/full/chapter/Overview.xml @@ -2,7 +2,7 @@
  • OCW Problem 1-3 - Reverse engineer a black-box resistor network

  • -

    Since the course has students from a diverse set of backgrounds, the first week's tutorials includes several extra segments, worked out with greater detail, to help bring everyone up to speed.

    +

    Since the course has students from a diverse set of backgrounds, the first week's tutorials includes several extra segments, worked out with greater detail, to help bring everyone up to speed. Gratuitous ≥ entity.

    diff --git a/common/test/data/full/html/html_5555.html b/common/test/data/full/html/html_5555.html index 44a015faa1..b8352b0b4f 100644 --- a/common/test/data/full/html/html_5555.html +++ b/common/test/data/full/html/html_5555.html @@ -1 +1 @@ - Lab Introduction or Interactive Lab Usage Handout for information on how to do the lab + Lab Introduction or Interactive Lab Usage Handout for information on how to do the lab. diff --git a/common/test/data/full/html/linearity_clarify.html b/common/test/data/full/html/linearity_clarify.html index 555f394c88..a349129ff8 100644 --- a/common/test/data/full/html/linearity_clarify.html +++ b/common/test/data/full/html/linearity_clarify.html @@ -34,6 +34,6 @@ the Thevenin or Norton theorems to summarize the behavior at a pair of exposed terminals.

    - Sorry for the confusion of words -- natural language is like + Sorry for the confusion of words — natural language is like that!

    diff --git a/common/test/data/full/html/linearity_clarify.xml b/common/test/data/full/html/linearity_clarify.xml index 066b22a110..400316a63e 100644 --- a/common/test/data/full/html/linearity_clarify.xml +++ b/common/test/data/full/html/linearity_clarify.xml @@ -34,6 +34,6 @@ the Thevenin or Norton theorems to summarize the behavior at a pair of exposed terminals.

    - Sorry for the confusion of words -- natural language is like + Sorry for the confusion of words — natural language is like that!

    diff --git a/common/test/data/full/html/schematic_tutorial.html b/common/test/data/full/html/schematic_tutorial.html index 991201ab15..fb0ecdaa95 100644 --- a/common/test/data/full/html/schematic_tutorial.html +++ b/common/test/data/full/html/schematic_tutorial.html @@ -9,14 +9,14 @@ the right of the diagram area) and drag it onto the diagram. Release the mouse when the component is in the correct position. - + Move a component Click to select a component in the diagram (it will turn green) and then drag it to its new location. You can use shift-click to add a component to the current selection. Or you can click somewhere in the diagram that is not on top of a component and drag out a selection -rectangle -- components intersecting the rectangle will be added to +rectangle — components intersecting the rectangle will be added to the current selection. @@ -63,7 +63,7 @@ engineeering notation: Add a wire Wires start at connection points, the open circles that appear at the terminals of components or the ends of wires. -Click on a connection point to start a wire -- a green wire +Click on a connection point to start a wire — a green wire will appear with one end anchored at the starting point. Drag the mouse and release the mouse button when the other end of the wire is positioned as you wish. Once a wire has diff --git a/common/test/data/full/html/units_hint.html b/common/test/data/full/html/units_hint.html index 02648b31e7..72e251a034 100644 --- a/common/test/data/full/html/units_hint.html +++ b/common/test/data/full/html/units_hint.html @@ -1,4 +1,4 @@ -Hint +Hint…

    Be careful of units here. Make sure you notice multipliers such -as u, k, m, M. +as u (or μ), k, m, M. diff --git a/common/test/data/full/info/updates.html b/common/test/data/full/info/updates.html index 6531ed417d..2604dc5d9e 100644 --- a/common/test/data/full/info/updates.html +++ b/common/test/data/full/info/updates.html @@ -9,8 +9,9 @@
  • May 2

      -
    • We have opened the show-answer button on the midterm.
    • -
    • There was a four hour outage in posting ability on the discussion board Monday night. It has been fixed. We apologise for the inconvenience.
    • + +
    • We have opened the show-answer button on the midterm…
    • +
    • There was a four hour outage in posting ability on the discussion board Monday night… It has been fixed. We apologise for the inconvenience.
  • April 30

    diff --git a/common/test/data/full/problem/Circuit_Sandbox.xml b/common/test/data/full/problem/Circuit_Sandbox.xml index 89625f447b..1582f3ff0b 100644 --- a/common/test/data/full/problem/Circuit_Sandbox.xml +++ b/common/test/data/full/problem/Circuit_Sandbox.xml @@ -1,6 +1,6 @@ -

    Here's a sandbox where you can experiment with all the components +

    Here's a sandbox where you can experiment with all the components we'll discuss in 6.002x. If you click on CHECK below, your diagram -will be saved on the server and you can return at some later time. +will be saved on the server and you can return at some later time…

    correct = ['correct']
    diff --git a/common/test/data/full/problem/H1P3_Poor_Workmanship.xml b/common/test/data/full/problem/H1P3_Poor_Workmanship.xml index cf9db4053a..f32b9eb271 100644 --- a/common/test/data/full/problem/H1P3_Poor_Workmanship.xml +++ b/common/test/data/full/problem/H1P3_Poor_Workmanship.xml @@ -78,7 +78,8 @@ So the total heating power in Joe's shop was:
    -No wonder Joe was cold. + +No wonder Joe was cold… diff --git a/common/test/data/full/problem/Lab_0_Using_the_Tools.xml b/common/test/data/full/problem/Lab_0_Using_the_Tools.xml index b5f593c294..c270773da3 100644 --- a/common/test/data/full/problem/Lab_0_Using_the_Tools.xml +++ b/common/test/data/full/problem/Lab_0_Using_the_Tools.xml @@ -94,7 +94,7 @@ scope probes to nodes A, B and C and edit their properties so that the plots will be different colors. Now run a transient analysis for 5ms. Move the mouse over the plot until the marker (a vertical dashed line that follows the mouse when it's over the plot) is at approximately -1.25ms. Please report the measured voltages for nodes A, B and C. +1.25ms. Please report the measured voltages for nodes A, B and C…
    diff --git a/common/test/data/full/problem/Sample_Algebraic_Problem.xml b/common/test/data/full/problem/Sample_Algebraic_Problem.xml index 7bea1cc92e..85b9a2fcc4 100644 --- a/common/test/data/full/problem/Sample_Algebraic_Problem.xml +++ b/common/test/data/full/problem/Sample_Algebraic_Problem.xml @@ -6,7 +6,7 @@ z = "A*x^2 + sqrt(y)" Enter the algebraic expression \(A x^2 + \sqrt{y}\) in the box below. The entry is case sensitive. The product must be indicated with an asterisk, and the exponentation with a caret, so you must write -"A*x^2 + sqrt(y)". +"A*x^2 + sqrt(y)"… diff --git a/common/test/data/full/problem/Sample_Numeric_Problem.xml b/common/test/data/full/problem/Sample_Numeric_Problem.xml index f41881a028..fef9b4648c 100644 --- a/common/test/data/full/problem/Sample_Numeric_Problem.xml +++ b/common/test/data/full/problem/Sample_Numeric_Problem.xml @@ -1,6 +1,6 @@ Enter the numerical value of the expression \(x + y\) where -\(x = 3\) and \(y = 5\). +\(x = 3\) and \(y = 5\)… diff --git a/common/test/data/full/problem/choiceresponse_demo.xml b/common/test/data/full/problem/choiceresponse_demo.xml index f7d1fcf16c..7af7939d74 100644 --- a/common/test/data/full/problem/choiceresponse_demo.xml +++ b/common/test/data/full/problem/choiceresponse_demo.xml @@ -1,19 +1,20 @@ -

    Consider a hypothetical magnetic field pointing out of your computer screen. Now imagine an electron traveling from right to leftin the plane of your screen. A diagram of this situation is show below.

    +

    Consider a hypothetical magnetic field pointing out of your computer screen. Now imagine an electron traveling from right to left in the plane of your screen. A diagram of this situation is show below…

    a. The magnitude of the force experienced by the electron is proportional the product of which of the following? (Select all that apply.)

    -Magnetic field strength -Electric field strength -Electric charge of the electron -Radius of the electron -Mass of the electron -Velocity of the electron + +Magnetic field strength… +Electric field strength… +Electric charge of the electron… +Radius of the electron… +Mass of the electron… +Velocity of the electron… diff --git a/common/test/data/full/problem/codeinput_demo.xml b/common/test/data/full/problem/codeinput_demo.xml index 03d8fd8c31..a6662cb69c 100644 --- a/common/test/data/full/problem/codeinput_demo.xml +++ b/common/test/data/full/problem/codeinput_demo.xml @@ -2,7 +2,8 @@

    - Part 1: Function Types + + Part 1: Function Types…

    For each of the following functions, specify the type of its output. You can assume each function is called with an appropriate argument, as specified by its docstring.

    diff --git a/common/test/data/full/sequential/Administrivia_and_Circuit_Elements.xml b/common/test/data/full/sequential/Administrivia_and_Circuit_Elements.xml index 5c4c65f12d..26f8f5a08d 100644 --- a/common/test/data/full/sequential/Administrivia_and_Circuit_Elements.xml +++ b/common/test/data/full/sequential/Administrivia_and_Circuit_Elements.xml @@ -3,12 +3,13 @@ - S1E4 has been removed. + + S1E4 has been removed… diff --git a/common/test/data/full/vertical/vertical_89.xml b/common/test/data/full/vertical/vertical_89.xml index da15a6751a..c2b68b6bc2 100644 --- a/common/test/data/full/vertical/vertical_89.xml +++ b/common/test/data/full/vertical/vertical_89.xml @@ -1,6 +1,7 @@ -

    + +

    Inline content…

    + +and + + + ... + ... + ... + + + + + + +Without weighting, Problem 1 is worth 25% of the assignment, and Problem 2 is worth 75% of the assignment. + +Weighting for the problems can be set in the policy.json file. + + "problem/problem1": { + "weight": 2 + }, + "problem/problem2": { + "weight": 2 + }, + +With the above weighting, Problems 1 and 2 are each worth 50% of the assignment. + +Please note: When problems have weight, the point value is automatically included in the display name *except* when “weight”: 1.When “weight”: 1, no visual change occurs in the display name, leaving the point value open to interpretation to the student. + + + ## Section Weighting Once each section has a percentage score, we must total those sections into a diff --git a/doc/development.md b/doc/development.md index b4ac52d202..56415b691e 100644 --- a/doc/development.md +++ b/doc/development.md @@ -19,6 +19,11 @@ Use the MacPorts package `mongodb` or the Homebrew formula `mongodb` ## Initializing Mongodb +First start up the mongo daemon. E.g. to start it up in the background +using a config file: + + mongod --config /usr/local/etc/mongod.conf & + Check out the course data directories that you want to work with into the `GITHUB_REPO_ROOT` (by default, `../data`). Then run the following command: @@ -37,8 +42,12 @@ This runs all the tests (long, uses collectstatic): If if you aren't changing static files, can run `rake test` once, then run - rake fasttest_{lms,cms} + rake fasttest_lms +or + + rake fasttest_cms + xmodule can be tested independently, with this: rake test_common/lib/xmodule @@ -67,6 +76,15 @@ To run a single nose test: Very handy: if you uncomment the `--pdb` argument in `NOSE_ARGS` in `lms/envs/test.py`, it will drop you into pdb on error. This lets you go up and down the stack and see what the values of the variables are. Check out http://docs.python.org/library/pdb.html +## Testing using queue servers + +When testing problems that use a queue server on AWS (e.g. sandbox-xqueue.edx.org), you'll need to run your server on your public IP, like so. + +`django-admin.py runserver --settings=lms.envs.dev --pythonpath=. 0.0.0.0:8000` + +When you connect to the LMS, you need to use the public ip. Use `ifconfig` to figure out the numnber, and connect e.g. to `http://18.3.4.5:8000/` + + ## Content development If you change course content, while running the LMS in dev mode, it is unnecessary to restart to refresh the modulestore. diff --git a/doc/remote_gradebook.md b/doc/remote_gradebook.md new file mode 100644 index 0000000000..3743e98753 --- /dev/null +++ b/doc/remote_gradebook.md @@ -0,0 +1,47 @@ +Grades can be pushed to a remote gradebook, and course enrollment membership can be pulled from a remote gradebook. This file documents how to setup such a remote gradebook, and what the API should be for writing new remote gradebook "xservers". + +1. Definitions + +An "xserver" is a web-based server that is part of the MITx eco system. There are a number of "xserver" programs, including one which does python code grading, an xqueue server, and graders for other coding languages. + +"Stellar" is the MIT on-campus gradebook system. + +2. Setup + +The remote gradebook xserver should be specified in the lms.envs configuration using + + MITX_FEATURES[REMOTE_GRADEBOOK_URL] + +Each course, in addition, should define the name of the gradebook being used. A class "section" may also be specified. This goes in the policy.json file, eg: + + "remote_gradebook": { + "name" : "STELLAR:/project/mitxdemosite", + "section" : "r01" + }, + +3. The API for the remote gradebook xserver is an almost RESTful service model, which only employs POSTs, to the xserver url, with form data for the fields: + + - submit: get-assignments, get-membership, post-grades, or get-sections + - gradebook: name of gradebook + - user: username of staff person initiating the request (for logging) + - section: (optional) name of section + +The return body content should be a JSON string, of the format {'msg': message, 'data': data}. The message is displayed in the instructor dashboard. + +The data is a list of dicts (associative arrays). Each dict should be key:value. + +## For submit=post-grades: + +A file is also posted, with the field name "datafile". This file is CSV format, with two columns, one being "External email" and the other being the name of the assignment (that column contains the grades for the assignment). + +## For submit=get-assignments + +data keys = "AssignmentName" + +## For submit=get-membership + +data keys = "email", "name", "section" + +## For submit=get-sections + +data keys = "SectionName" diff --git a/doc/testing.md b/doc/testing.md index ee54ae74d9..694a9e8231 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -1,17 +1,25 @@ # Testing -Testing is good. Here is some useful info about how we set up tests-- +Testing is good. Here is some useful info about how we set up tests. +More info is [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Test+Engineering) -### Backend code: +## Backend code -- TODO +- The python unit tests can be run via rake tasks. +See development.md for more info on how to do this. -### Frontend code: +## Frontend code -We're using Jasmine to unit-testing the JavaScript files. All the specs are -written in CoffeeScript for the consistency. To access the test cases, start the -server in debug mode, navigate to `http://127.0.0.1:[port number]/_jasmine` to -see the test result. +### Jasmine + +We're using Jasmine to unit/integration test the JavaScript files. +More info [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Jasmine) + +All the specs are written in CoffeeScript to be consistent with the code. +To access the test cases, start the server using the settings file **jasmine.py** using this command: + `rake django-admin[runserver,lms,jasmine,12345]` + +Then navigate to `http://localhost:12345/_jasmine/` to see the test results. All the JavaScript codes must have test coverage. Both CMS and LMS has its own test directory in `{cms,lms}/static/coffee/spec` If you haven't @@ -30,3 +38,31 @@ If you're finishing a feature that contains JavaScript code snippets and do not sure how to test, please feel free to open up a pull request and asking people for help. (However, the best way to do it would be writing your test first, then implement your feature - Test Driven Development.) + +### BDD style acceptance tests with Lettuce + +We're using Lettuce for end user acceptance testing of features. +More info [on the wiki](https://edx-wiki.atlassian.net/wiki/display/ENG/Lettuce+Acceptance+Testing) + +Lettuce is a port of Cucumber. We're using it to drive Splinter, which is a python wrapper to Selenium. +To execute the automated test scripts, you'll need to start up the django server separately, then launch the tests. +Do both use the settings file named **acceptance.py**. + +What this will do is to use a sqllite database named mitx_all/db/test_mitx.db. +That way it can be flushed etc. without messing up your dev db. +Note that this also means that you need to syncdb and migrate the db first before starting the server to initialize it if it does not yet exist. + +1. Set up the test database (only needs to be done once): + rm ../db/test_mitx.db + rake django-admin[syncdb,lms,acceptance,--noinput] + rake django-admin[migrate,lms,acceptance,--noinput] + +2. Start up the django server separately in a shell + rake lms[acceptance] + +3. Then in another shell, run the tests in different ways as below. Lettuce comes with a new django-admin command called _harvest_. See the [lettuce django docs](http://lettuce.it/recipes/django-lxml.html) for more details. +* All tests in a specified feature folder: `django-admin.py harvest --no-server --settings=lms.envs.acceptance --pythonpath=. lms/djangoapps/portal/features/` +* Only the specified feature's scenarios: `django-admin.py harvest --no-server --settings=lms.envs.acceptance --pythonpath=. lms/djangoapps/courseware/features/high-level-tabs.feature` + +4. Troubleshooting +* If you get an error msg that says something about harvest not being a command, you probably are missing a requirement. Pip install (test-requirements.txt) and/or brew install as needed. \ No newline at end of file diff --git a/doc/xml-format.md b/doc/xml-format.md index 46082b90ad..8138de4d7e 100644 --- a/doc/xml-format.md +++ b/doc/xml-format.md @@ -257,6 +257,7 @@ Supported fields at the course level: * "tabs" -- have custom tabs in the courseware. See below for details on config. * "discussion_blackouts" -- An array of time intervals during which you want to disable a student's ability to create or edit posts in the forum. Moderators, Community TAs, and Admins are unaffected. You might use this during exam periods, but please be aware that the forum is often a very good place to catch mistakes and clarify points to students. The better long term solution would be to have better flagging/moderation mechanisms, but this is the hammer we have today. Format by example: [["2012-10-29T04:00", "2012-11-03T04:00"], ["2012-12-30T04:00", "2013-01-02T04:00"]] * "show_calculator" (value "Yes" if desired) +* "days_early_for_beta" -- number of days (floating point ok) early that students in the beta-testers group get to see course content. Can also be specified for any other course element, and overrides values set at higher levels. * TODO: there are others ### Grading policy file contents @@ -418,6 +419,10 @@ If you want to customize the courseware tabs displayed for your course, specify * "external_link". Parameters "name", "link". * "textbooks". No parameters--generates tab names from book titles. * "progress". Parameter "name". +* "static_tab". Parameters "name", 'url_slug'--will look for tab contents in + 'tabs/{course_url_name}/{tab url_slug}.html' +* "staff_grading". No parameters. If specified, displays the staff grading tab for instructors. + # Tips for content developers @@ -429,9 +434,7 @@ before the week 1 material to make it easy to find in the file. * Come up with a consistent pattern for url_names, so that it's easy to know where to look for any piece of content. It will also help to come up with a standard way of splitting your content files. As a point of departure, we suggest splitting chapters, sequences, html, and problems into separate files. -* A heads up: our content management system will allow you to develop content through a web browser, but will be backed by this same xml at first. Once that happens, every element will be in its own file to make access and updates faster. - -* Prefer the most "semantic" name for containers: e.g., use problemset rather than vertical for a problem set. That way, if we decide to display problem sets differently, we don't have to change the xml. +* Prefer the most "semantic" name for containers: e.g., use problemset rather than sequential for a problem set. That way, if we decide to display problem sets differently, we don't have to change the xml. # Other file locations (info and about) diff --git a/docs/source/capa.rst b/docs/source/capa.rst index f83d89f52d..345855af5e 100644 --- a/docs/source/capa.rst +++ b/docs/source/capa.rst @@ -1,9 +1,15 @@ ******************************************* Capa module ******************************************* +Contents: .. module:: capa +.. toctree:: + :maxdepth: 2 + + chem.rst + Calc ==== diff --git a/docs/source/chem.rst b/docs/source/chem.rst new file mode 100644 index 0000000000..26e01a3238 --- /dev/null +++ b/docs/source/chem.rst @@ -0,0 +1,69 @@ +******************************************* +Chem module +******************************************* + +.. module:: chem + +Miller +====== + +.. automodule:: capa.chem.miller + :members: + :show-inheritance: + +UI part and inputtypes +---------------------- +Miller module is used in the system in crystallography problems. +Crystallography is a class in :mod:`capa` inputtypes module. +It uses *crystallography.html* for rendering and **crystallography.js** +for UI part. + +Documentation from **crystallography.js**:: + + For a crystallographic problem of the type + + Given a plane definition via miller indexes, specify it by plotting points on the edges + of a 3D cube. Additionally, select the correct Bravais cubic lattice type depending on the + physical crystal mentioned in the problem. + + we create a graph which contains a cube, and a 3D Cartesian coordinate system. The interface + will allow to plot 3 points anywhere along the edges of the cube, and select which type of + Bravais lattice should be displayed along with the basic cube outline. + + When 3 points are successfully plotted, an intersection of the resulting plane (defined by + the 3 plotted points), and the cube, will be automatically displayed for clarity. + + After lotting the three points, it is possible to continue plotting additional points. By + doing so, the point that was plotted first (from the three that already exist), will be + removed, and the new point will be added. The intersection of the resulting new plane and + the cube will be redrawn. + + The UI has been designed in such a way, that the user is able to determine which point will + be removed next (if adding a new point). This is achieved via filling the to-be-removed point + with a different color. + + + +Chemcalc +======== + +.. automodule:: capa.chem.chemcalc + :members: + :show-inheritance: + +Chemtools +========= + +.. automodule:: capa.chem.chemtools + :members: + :show-inheritance: + + +Tests +===== + +.. automodule:: capa.chem.tests + :members: + :show-inheritance: + + diff --git a/docs/source/graphical_slider_tool.rst b/docs/source/graphical_slider_tool.rst new file mode 100644 index 0000000000..37b17136e8 --- /dev/null +++ b/docs/source/graphical_slider_tool.rst @@ -0,0 +1,563 @@ +********************************************* +Xml format of graphical slider tool [xmodule] +********************************************* + +.. module:: xml_format_gst + + +Format description +================== + +Graphical slider tool (GST) main tag is:: + + BODY + +``graphical_slider_tool`` tag must have two children tags: ``render`` +and ``configuration``. + + +Render tag +---------- + +Render tag can contain usual html tags mixed with some GST specific tags:: + + - represents jQuery slider for changing a parameter's value + - represents a text input field for changing a parameter's value + - represents Flot JS plot element + +Also GST will track all elements inside ```` where ``id`` +attribute is set, and a corresponding parameter referencing that ``id`` is present +in the configuration section below. These will be referred to as dynamic elements. + +The contents of the section will be shown to the user after +all occurrences of:: + + + + + +have been converted to actual sliders, text inputs, and a plot graph. +Everything in square brackets is optional. After initialization, all +text input fields, sliders, and dynamic elements will be set to the initial +values of the parameters that they are assigned to. + +``{parameter name}`` specifies the parameter to which the slider or text +input will be attached to. + +[style="{CSS statements}"] specifies valid CSS styling. It will be passed +directly to the browser without any parsing. + +There is a one-to-one relationship between a slider and a parameter. +I.e. for one parameter you can put only one ```` in the +```` section. However, you don't have to specify a slider - they +are optional. + +There is a many-to-one relationship between text inputs and a +parameter. I.e. for one parameter you can put many '' elements in +the ```` section. However, you don't have to specify a text +input - they are optional. + +You can put only one ```` in the ```` section. It is not +required. + + +Slider tag +.......... + +Slider tag must have ``var`` attribute and optional ``style`` attribute:: + + + +After processing, slider tags will be replaced by jQuery UI sliders with applied +``style`` attribute. + +``var`` attribute must correspond to a parameter. Parameters can be used in any +of the ``function`` tags in ``functions`` tag. By moving slider, value of +parameter ``a`` will change, and so result of function, that depends on parameter +``a``, will also change. + + +Textbox tag +........... + +Texbox tag must have ``var`` attribute and optional ``style`` attribute:: + + + +After processing, textbox tags will be replaced by html text inputs with applied +``style`` attribute. If you want a readonly text input, then you should use a +dynamic element instead (see section below "HTML tagsd with ID"). + +``var`` attribute must correspond to a parameter. Parameters can be used in any +of the ``function`` tags in ``functions`` tag. By changing the value on the text input, +value of parameter ``a`` will change, and so result of function, that depends on +parameter ``a``, will also change. + + +Plot tag +........ + +Plot tag may have optional ``style`` attribute:: + + + +After processing plot tags will be replaced by Flot JS plot with applied +``style`` attribute. + + +HTML tags with ID (dynamic elements) +.................................... + +Any HTML tag with ID, e.g. ```` can be used as a +place where result of function can be inserted. To insert function result to +an element, element ID must be included in ``function`` tag as ``el_id`` attribute +and ``output`` value must be ``"element"``:: + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +Configuration tag +----------------- + +The configuration tag contains parameter settings, graph +settings, and function definitions which are to be plotted on the +graph and that use specified parameters. + +Configuration tag contains two mandatory tag ``functions`` and ``parameters`` and +may contain another ``plot`` tag. + + +Parameters tag +.............. + +``Parameters`` tag contains ``parameter`` tags. Each ``parameter`` tag must have +``var``, ``max``, ``min``, ``step`` and ``initial`` attributes:: + + + + + + +``var`` attribute links min, max, step and initial values to parameter name. + +``min`` attribute is the minimal value that a parameter can take. Slider and input +values can not go below it. + +``max`` attribute is the maximal value that a parameter can take. Slider and input +values can not go over it. + +``step`` attribute is value of slider step. When a slider increase or decreases +the specified parameter, it will do so by the amount specified with 'step' + +``initial`` attribute is the initial value that the specified parameter should be +set to. Sliders and inputs will initially show this value. + +The parameter's name is specified by the ``var`` property. All occurrences +of sliders and/or text inputs that specify a ``var`` property, will be +connected to this parameter - i.e. they will reflect the current +value of the parameter, and will be updated when the parameter +changes. + +If at lest one of these attributes is not set, then the parameter +will not be used, slider's and/or text input elements that specify +this parameter will not be activated, and the specified functions +which use this parameter will not return a numeric value. This means +that neglecting to specify at least one of the attributes for some +parameter will have the result of the whole GST instance not working +properly. + + +Functions tag +............. + +For the GST to do something, you must defined at least one +function, which can use any of the specified parameter values. The +function expects to take the ``x`` value, do some calculations, and +return the ``y`` value. I.e. this is a 2D plot in Cartesian +coordinates. This is how the default function is meant to be used for +the graph. + +There are other special cases of functions. They are used mainly for +outputting to elements, plot labels, or for custom output. Because +the return a single value, and that value is meant for a single element, +these function are invoked only with the set of all of the parameters. +I.e. no ``x`` value is available inside them. They are useful for +showing the current value of a parameter, showing complex static +formulas where some parameter's value must change, and other useful +things. + +The different style of function is specified by the ``output`` attribute. + +Each function must be defined inside ``function`` tag in ``functions`` tag:: + + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +The parameter names (along with their values, as provided from text +inputs and/or sliders), will be available inside all defined +functions. A defined function body string will be parsed internally +by the browser's JavaScript engine and converted to a true JS +function. + +The function's parameter list will automatically be created and +populated, and will include the ``x`` (when ``output`` is not specified or +is set to ``"graph"``), and all of the specified parameter values (from sliders +and text inputs). This means that each of the defined functions will have +access to all of the parameter values. You don't have to use them, but +they will be there. + +Examples:: + + + return x; + + + + return (x + a) * Math.sin(x * b); + + + + function helperFunc(c1) { + return c1 * c1 - a; + } + return helperFunc(x + 10 * a * b) + Math.sin(a - x); + + +Required parameters:: + + function body: + + A string composing a normal JavaScript function + except that there is no function declaration + (along with parameters), and no closing bracket. + + So if you normally would have written your + JavaScript function like this: + + function myFunc(x, a, b) { + return x * a + b; + } + + here you must specify just the function body + (everything that goes between '{' and '}'). So, + you would specify the above function like so (the + bare-bone minimum): + + return x * a + b; + + VERY IMPORTANT: Because the function will be passed + to the browser as a single string, depending on implementation + specifics, the end-of-line characters can be stripped. This + means that single line JavaScript comments (starting with "//") + can lead to the effect that everything after the first such comment + will be treated as a comment. Therefore, it is absolutely + necessary that such single line comments are not used when + defining functions for GST. You can safely use the alternative + multiple line JavaScript comments (such comments start with "/*" + and end with "*/). + + VERY IMPORTANT: If you have a large function body, and decide to + split it into several lines, than you must wrap it in "CDATA" like + so: + + + + + +Optional parameters:: + + + color: Color name ('red', 'green', etc.) or in the form of + '#FFFF00'. If not specified, a default color (different + one for each graphed function) will be given by Flot JS. + line: A string - 'true' or 'false'. Should the data points be + connected by a line on the graph? Default is 'true'. + dot: A string - 'true' or 'false'. Should points be shown for + each data point on the graph? Default is 'false'. + bar: A string - 'true' or 'false'. When set to 'true', points + will be plotted as bars. + label: A string. If provided, will be shown in the legend, along + with the color that was used to plot the function. + output: 'element', 'none', 'plot_label', or 'graph'. If not defined, + function will be plotted (same as setting 'output' to 'graph'). + If defined, and other than 'graph', function will not be + plotted, but it's output will be inserted into the element + with ID specified by 'el_id' attribute. + el_id: Id of HTML element, defined in '' section. Value of + function will be inserted as content of this element. + disable_auto_return: By default, if JavaScript function string is written + without a "return" statement, the "return" will be + prepended to it. Set to "true" to disable this + functionality. This is done so that simple functions + can be defined in an easy fashion (for example, "a", + which will be translated into "return a"). + update_on: A string - 'change', or 'slide'. Default (if not set) is + 'slide'. This defines the event on which a given function is + called, and its result is inserted into an element. This + setting is relevant only when "output" is other than "graph". + +When specifying ``el_id``, it is essential to set "output" to one of + element - GST will invoke the function, and the return of it will be + inserted into a HTML element with id specified by ``el_id``. + none - GST will simply inoke the function. It is left to the instructor + who writes the JavaScript function body to update all necesary + HTML elements inside the function, before it exits. This is done + so that extra steps can be preformed after an HTML element has + been updated with a value. Note, that because the return value + from this function is not actually used, it will be tempting to + omit the "return" statement. However, in this case, the attribute + "disable_auto_return" must be set to "true" in order to prevent + GST from inserting a "return" statement automatically. + plot_label - GST will process all plot labels (which are strings), and + will replace the all instances of substrings specified by + ``el_id`` with the returned value of the function. This is + necessary if you want a label in the graph to have some changing + number. Because of the nature of Flot JS, it is impossible to + achieve the same effect by setting the "output" attribute + to "element", and including a HTML element in the label. + +The above values for "output" will tell GST that the function is meant for an +HTML element (not for graph), and that it should not get an 'x' parameter (along +with some value). + + +[Note on MathJax and labels] +............................ + +Independently of this module, will render all TeX code +within the ```` section into nice mathematical formulas. Just +remember to wrap it in one of:: + + \( and \) - for inline formulas (formulas surrounded by + standard text) + \[ and \] - if you want the formula to be a separate line + +It is possible to define a label in standard TeX notation. The JS +library MathJax will work on these labels also because they are +inserted on top of the plot as standard HTML (text within a DIV). + +If the label is dynamic, i.e. it will contain some text (numeric, or other) +that has to be updated on a parameter's change, then one can define +a special function to handle this. The "output" of such a function must be +set to "none", and the JavaScript code inside this function must update the +MathJax element by itself. Before exiting, MathJax typeset function should +be called so that the new text will be re-rendered by MathJax. For example, + + + ... + + + ... + + + + ... + + +Plot tag +........ + +``Plot`` tag inside ``configuration`` tag defines settings for plot output. + +Required parameters:: + + xrange: 2 functions that must return value. Value is constant (3.1415) + or depend on parameter from parameters section: + + return 0; + return 30; + + or + + return -a; + return a; + + + All functions will be calculated over domain between xrange:min + and xrange:max. Xrange depending on parameter is extremely + useful when domain(s) of your function(s) depends on parameter + (like circle, when parameter is radius and you want to allow + to change it). + +Optional parameters:: + + num_points: Number of data points to generated for the plot. If + this is not set, the number of points will be + calculated as width / 5. + + bar_width: If functions are present which are to be plotted as bars, + then this parameter specifies the width of the bars. A + numeric value for this parameter is expected. + + bar_align: If functions are present which are to be plotted as bars, + then this parameter specifies how to align the bars relative + to the tick. Available values are "left" and "center". + + xticks, + yticks: 3 floating point numbers separated by commas. This + specifies how many ticks are created, what number they + start at, and what number they end at. This is different + from the 'xrange' setting in that it has nothing to do + with the data points - it control what area of the + Cartesian space you will see. The first number is the + first tick's value, the second number is the step + between each tick, the third number is the value of the + last tick. If these configurations are not specified, + Flot will chose them for you based on the data points + set that he is currently plotting. Usually, this results + in a nice graph, however, sometimes you need to fine + grain the controls. For example, when you want to show + a fixed area of the Cartesian space, even when the data + set changes. On it's own, Flot will recalculate the + ticks, which will result in a different graph each time. + By specifying the xticks, yticks configurations, only + the plotted data will change - the axes (ticks) will + remain as you have defined them. + + xticks_names, yticks_names: + A JSON string which represents a mapping of xticks, yticks + values to some defined strings. If specified, the graph will + not have any xticks, yticks except those for which a string + value has been defined in the JSON string. Note that the + matching will be string-based and not numeric. I.e. if a tick + value was "3.70" before, then inside the JSON there should be + a mapping like {..., "3.70": "Some string", ...}. Example: + + + + + + + + + + xunits, + yunits: Units values to be set on axes. Use MathJax. Example: + \(cm\) + \(m\) + + moving_label: + A way to specify a label that should be positioned dynamically, + based on the values of some parameters, or some other factors. + It is similar to a , but it is only valid for a plot + because it is drawn relative to the plot coordinate system. + + Multiple "moving_label" configurations can be provided, each one + with a unique text and a unique set of functions that determine + it's dynamic positioning. + + Each "moving_label" can have a "color" attribute (CSS color notation), + and a "weight" attribute. "weight" can be one of "normal" or "bold", + and determines the styling of moving label's text. + + Each "moving_label" function should return an object with a 'x' + and 'y properties. Within those functions, all of the parameter + names along with their value are available. + + Example (note that "return" statement is missing; it will be automatically + inserted by GST): + + + + +

    Graphic slider tool: Bar graph example.

    + +

    We can request the API to plot us a bar graph.

    +
    +

    a

    + + +


    +

    b

    + + +
    + +
    + + + + + + + + 0.9) && (x<1.1)) || ((x>4.9) && (x<5.1))) { return Math.sin(a * 0.01 * Math.PI + 2.952 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos(b * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos((b - 10 * a) * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + 1.9) && (x<2.1)) || ((x>3.9) && (x<4.1))) { return Math.cos((b + 7 * a) * 0.01 * Math.PI + 3.432 * x); } + else {return undefined;}]]> + + + + 15 + 5 + 0, 0.5, 6 + -1.5, 0.1, 1.5 + + + + + + + 0.4 + + +
    + diff --git a/docs/source/gst_example_dynamic_labels.xml b/docs/source/gst_example_dynamic_labels.xml new file mode 100644 index 0000000000..05cbe407fb --- /dev/null +++ b/docs/source/gst_example_dynamic_labels.xml @@ -0,0 +1,40 @@ + + + +

    Graphic slider tool: Dynamic labels.

    +

    There are two kinds of dynamic lables. + 1) Dynamic changing values in graph legends. + 2) Dynamic labels, which coordinates depend on parameters

    +

    a:

    +
    +

    b:

    +

    + +
    + + + + + + + + a * x + b + + a + + + 030 + 10 + 0, 6, 30 + -9, 1, 9 + + + + + + + + + +
    +
    \ No newline at end of file diff --git a/docs/source/gst_example_dynamic_range.xml b/docs/source/gst_example_dynamic_range.xml new file mode 100644 index 0000000000..0ce4263d62 --- /dev/null +++ b/docs/source/gst_example_dynamic_range.xml @@ -0,0 +1,37 @@ + + + +

    Graphic slider tool: Dynamic range and implicit functions.

    + +

    You can make x range (not ticks of x axis) of functions to depend on + parameter value. This can be useful when function domain depends + on parameter.

    +

    Also implicit functons like circle can be plotted as 2 separate + functions of same color.

    +
    + + +
    + +
    + + + + + + Math.sqrt(a * a - x * x) + -Math.sqrt(a * a - x * x) + + + + + -a + a + + 1000 + -30, 6, 30 + -30, 6, 30 + + +
    +
    diff --git a/docs/source/gst_example_html_element_output.xml b/docs/source/gst_example_html_element_output.xml new file mode 100644 index 0000000000..340783871a --- /dev/null +++ b/docs/source/gst_example_html_element_output.xml @@ -0,0 +1,40 @@ + + + +

    Graphic slider tool: Output to DOM element.

    + +

    a + b =

    + +
    +

    a

    + + +
    + +
    +

    b

    + + +
    +


    + +
    + + + + + + + + + function add(a, b, precision) { + var x = Math.pow(10, precision || 2); + return (Math.round(a * x) + Math.round(b * x)) / x; + } + + return add(a, b, 5); + + + +
    +
    diff --git a/docs/source/gst_example_with_documentation.xml b/docs/source/gst_example_with_documentation.xml new file mode 100644 index 0000000000..addada5b10 --- /dev/null +++ b/docs/source/gst_example_with_documentation.xml @@ -0,0 +1,91 @@ + + + + +

    Graphic slider tool: full example.

    +

    + A simple equation + \( + y_1 = 10 \times b \times \frac{sin(a \times x) \times sin(b \times x)}{cos(b \times x) + 10} + \) + can be plotted. +

    + + +
    +

    Currently \(a\) is

    + + +
    + +

    This one + \( + y_2 = sin(a \times x) + \) + will be overlayed on top. +

    +
    +

    Currently \(b\) is

    + +
    +
    +

    To change \(a\) use:

    + +
    +
    +

    To change \(b\) use:

    + +
    + +
    +

    Second input for b:

    + + +
    +
    + + + + + + + + + + + + return 10.0 * b * Math.sin(a * x) * Math.sin(b * x) / (Math.cos(b * x) + 10); + + + + Math.sin(a * x); + + + function helperFunc(c1) { + return c1 * c1 - a; + } + + return helperFunc(x + 10 * a * b) + Math.sin(a - x); + + a + + + + + + return 0; + + 30 + + + 120 + + 0, 3, 30 + -1.5, 1.5, 13.5 + + \(cm\) + \(m\) + + +
    +
    diff --git a/docs/source/index.rst b/docs/source/index.rst index 92c535a624..d2082ff3a0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -14,7 +14,7 @@ Contents: overview.rst common-lib.rst djangoapps.rst - + xml_formats.rst Indices and tables ================== diff --git a/docs/source/xml_formats.rst b/docs/source/xml_formats.rst new file mode 100644 index 0000000000..b76ee11642 --- /dev/null +++ b/docs/source/xml_formats.rst @@ -0,0 +1,8 @@ +XML formats of Inputtypes and Xmodule +===================================== +Contents: + +.. toctree:: + :maxdepth: 2 + + graphical_slider_tool.rst \ No newline at end of file diff --git a/install-system-req.sh b/install-system-req.sh new file mode 100755 index 0000000000..37bc6d1716 --- /dev/null +++ b/install-system-req.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +# posix compliant sanity check +if [ -z $BASH ] || [ $BASH = "/bin/sh" ]; then + echo "Please use the bash interpreter to run this script" + exit 1 +fi + +error() { + printf '\E[31m'; echo "$@"; printf '\E[0m' +} +output() { + printf '\E[36m'; echo "$@"; printf '\E[0m' +} + + +### START + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BREW_FILE=$DIR/"brew-formulas.txt" +APT_REPOS_FILE=$DIR/"apt-repos.txt" +APT_PKGS_FILE=$DIR/"apt-packages.txt" + +case `uname -s` in + [Ll]inux) + command -v lsb_release &>/dev/null || { + error "Please install lsb-release." + exit 1 + } + + distro=`lsb_release -cs` + case $distro in + maya|lisa|natty|oneiric|precise|quantal) + output "Installing Ubuntu requirements" + + # DEBIAN_FRONTEND=noninteractive is required for silent mysql-server installation + export DEBIAN_FRONTEND=noninteractive + + # add repositories + cat $APT_REPOS_FILE | xargs -n 1 sudo add-apt-repository -y + sudo apt-get -y update + + # install packages listed in APT_PKGS_FILE + cat $APT_PKGS_FILE | xargs sudo apt-get -y install + ;; + *) + error "Unsupported distribution - $distro" + exit 1 + ;; + esac + ;; + Darwin) + + if [[ ! -w /usr/local ]]; then + cat</dev/null || { + output "Installing $pkg" + brew install $pkg + } + done + + # paths where brew likes to install python scripts + PATH=/usr/local/share/python:/usr/local/bin:$PATH + + command -v pip &>/dev/null || { + output "Installing pip" + easy_install pip + } + + if ! grep -Eq ^1.7 <(virtualenv --version 2>/dev/null); then + output "Installing virtualenv >1.7" + pip install 'virtualenv>1.7' virtualenvwrapper + fi + + command -v coffee &>/dev/null || { + output "Installing coffee script" + curl --insecure https://npmjs.org/install.sh | sh + npm install -g coffee-script + } + ;; + *) + error "Unsupported platform" + exit 1 + ;; +esac diff --git a/jenkins/quality.sh b/jenkins/quality.sh index 4cf26d76bf..56217af874 100755 --- a/jenkins/quality.sh +++ b/jenkins/quality.sh @@ -3,6 +3,8 @@ set -e set -x +git remote prune origin + # Reset the submodule, in case it changed git submodule foreach 'git reset --hard HEAD' diff --git a/jenkins/test.sh b/jenkins/test.sh new file mode 100755 index 0000000000..7d946a24cd --- /dev/null +++ b/jenkins/test.sh @@ -0,0 +1,55 @@ +#! /bin/bash + +set -e +set -x + +function github_status { + gcli status create mitx mitx $GIT_COMMIT \ + --params=$1 \ + target_url:$BUILD_URL \ + description:"Build #$BUILD_NUMBER $2" \ + -f csv +} + +function github_mark_failed_on_exit { + trap '[ $? == "0" ] || github_status state:failure "failed"' EXIT +} + +git remote prune origin + +github_mark_failed_on_exit +github_status state:pending "is running" + +# Reset the submodule, in case it changed +git submodule foreach 'git reset --hard HEAD' + +# Set the IO encoding to UTF-8 so that askbot will start +export PYTHONIOENCODING=UTF-8 + +GIT_BRANCH=${GIT_BRANCH/HEAD/master} + +if [ ! -d /mnt/virtualenvs/"$JOB_NAME" ]; then + mkdir -p /mnt/virtualenvs/"$JOB_NAME" + virtualenv /mnt/virtualenvs/"$JOB_NAME" +fi + +source /mnt/virtualenvs/"$JOB_NAME"/bin/activate +pip install -q -r pre-requirements.txt +pip install -q -r test-requirements.txt +yes w | pip install -q -r requirements.txt + +rake clobber +TESTS_FAILED=0 +# Don't run the studio tests until feature/cale/cms-master is merged in +# rake test_cms[false] || TESTS_FAILED=1 +rake test_lms[false] || TESTS_FAILED=1 +rake test_common/lib/capa || TESTS_FAILED=1 +rake test_common/lib/xmodule || TESTS_FAILED=1 +# Don't run the studio tests until feature/cale/cms-master is merged in +# rake phantomjs_jasmine_cms || true +rake coverage:xml coverage:html + +[ $TESTS_FAILED == '0' ] +rake autodeploy_properties + +github_status state:success "passed" diff --git a/jenkins/test_edge.sh b/jenkins/test_edge.sh deleted file mode 100755 index 7b58b481f6..0000000000 --- a/jenkins/test_edge.sh +++ /dev/null @@ -1,29 +0,0 @@ -#! /bin/bash - -set -e -set -x - -# Reset the submodule, in case it changed -git submodule foreach 'git reset --hard HEAD' - -# Set the IO encoding to UTF-8 so that askbot will start -export PYTHONIOENCODING=UTF-8 - -GIT_BRANCH=${GIT_BRANCH/HEAD/master} - -pip install -q -r pre-requirements.txt -yes w | pip install -q -r requirements.txt -[ ! -d askbot ] || pip install -q -r askbot/askbot_requirements.txt - -rake clobber -TESTS_FAILED=0 -rake test_cms[false] || TESTS_FAILED=1 -rake test_lms[false] || TESTS_FAILED=1 -rake test_common/lib/capa || TESTS_FAILED=1 -rake test_common/lib/xmodule || TESTS_FAILED=1 -rake phantomjs_jasmine_lms || true -rake phantomjs_jasmine_cms || true -rake coverage:xml coverage:html - -[ $TESTS_FAILED == '0' ] -rake autodeploy_properties \ No newline at end of file diff --git a/jenkins/test_lms.sh b/jenkins/test_lms.sh deleted file mode 100755 index 98640c2b5b..0000000000 --- a/jenkins/test_lms.sh +++ /dev/null @@ -1,27 +0,0 @@ -#! /bin/bash - -set -e -set -x - -# Reset the submodule, in case it changed -git submodule foreach 'git reset --hard HEAD' - -# Set the IO encoding to UTF-8 so that askbot will start -export PYTHONIOENCODING=UTF-8 - -GIT_BRANCH=${GIT_BRANCH/HEAD/master} - -pip install -q -r pre-requirements.txt -yes w | pip install -q -r requirements.txt -[ ! -d askbot ] || pip install -q -r askbot/askbot_requirements.txt - -rake clobber -TESTS_FAILED=0 -rake test_lms[false] || TESTS_FAILED=1 -rake test_common/lib/capa || TESTS_FAILED=1 -rake test_common/lib/xmodule || TESTS_FAILED=1 -rake phantomjs_jasmine_lms || true -rake coverage:xml coverage:html - -[ $TESTS_FAILED == '0' ] -rake autodeploy_properties \ No newline at end of file diff --git a/lms/.coveragerc b/lms/.coveragerc index acac3ed4f2..7e18a37492 100644 --- a/lms/.coveragerc +++ b/lms/.coveragerc @@ -2,11 +2,13 @@ [run] data_file = reports/lms/.coverage source = lms +omit = lms/envs/* [report] ignore_errors = True [html] +title = LMS Python Test Coverage Report directory = reports/lms/cover [xml] diff --git a/lms/djangoapps/courseware/access.py b/lms/djangoapps/courseware/access.py index 00b4c763b3..c7e09526c9 100644 --- a/lms/djangoapps/courseware/access.py +++ b/lms/djangoapps/courseware/access.py @@ -4,15 +4,17 @@ like DISABLE_START_DATES""" import logging import time +from datetime import datetime, timedelta from django.conf import settings from xmodule.course_module import CourseDescriptor from xmodule.error_module import ErrorDescriptor from xmodule.modulestore import Location -from xmodule.timeparse import parse_time from xmodule.x_module import XModule, XModuleDescriptor +from student.models import CourseEnrollmentAllowed + DEBUG_ACCESS = False log = logging.getLogger(__name__) @@ -34,7 +36,8 @@ def has_access(user, obj, action): user: a Django user object. May be anonymous. - obj: The object to check access for. For now, a module or descriptor. + obj: The object to check access for. A module, descriptor, location, or + certain special strings (e.g. 'global') action: A string specifying the action that the client is trying to perform. @@ -70,7 +73,7 @@ def has_access(user, obj, action): raise TypeError("Unknown object type in has_access(): '{0}'" .format(type(obj))) -def get_access_group_name(obj,action): +def get_access_group_name(obj, action): ''' Returns group name for user group which has "action" access to the given object. @@ -123,6 +126,11 @@ def _has_access_course_desc(user, course, action): debug("Allow: in enrollment period") return True + # if user is in CourseEnrollmentAllowed with right course_id then can also enroll + if user is not None and CourseEnrollmentAllowed: + if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course.id): + return True + # otherwise, need staff access return _has_staff_access_to_descriptor(user, course) @@ -158,13 +166,19 @@ def _has_access_course_desc(user, course, action): return _dispatch(checkers, action, user, course) + def _get_access_group_name_course_desc(course, action): ''' - Return name of group which gives staff access to course. Only understands action = 'staff' + Return name of group which gives staff access to course. Only understands action = 'staff' and 'instructor' ''' - if not action=='staff': - return [] - return _course_staff_group_name(course.location) + if action=='staff': + return _course_staff_group_name(course.location) + elif action=='instructor': + return _course_instructor_group_name(course.location) + + return [] + + def _has_access_error_desc(user, descriptor, action): """ @@ -212,9 +226,10 @@ def _has_access_descriptor(user, descriptor, action): # Check start date if descriptor.start is not None: now = time.gmtime() - if now > descriptor.start: + effective_start = _adjust_start_date_for_beta_testers(user, descriptor) + if now > effective_start: # after start date, everyone can see it - debug("Allow: now > start date") + debug("Allow: now > effective start date") return True # otherwise, need staff access return _has_staff_access_to_descriptor(user, descriptor) @@ -314,6 +329,15 @@ def _course_staff_group_name(location): """ return 'staff_%s' % Location(location).course +def course_beta_test_group_name(location): + """ + Get the name of the beta tester group for a location. Right now, that's + beta_testers_COURSE. + + location: something that can passed to Location. + """ + return 'beta_testers_{0}'.format(Location(location).course) + def _course_instructor_group_name(location): """ @@ -334,6 +358,51 @@ def _has_global_staff_access(user): return False +def _adjust_start_date_for_beta_testers(user, descriptor): + """ + If user is in a beta test group, adjust the start date by the appropriate number of + days. + + Arguments: + user: A django user. May be anonymous. + descriptor: the XModuleDescriptor the user is trying to get access to, with a + non-None start date. + + Returns: + A time, in the same format as returned by time.gmtime(). Either the same as + start, or earlier for beta testers. + + NOTE: number of days to adjust should be cached to avoid looking it up thousands of + times per query. + + NOTE: For now, this function assumes that the descriptor's location is in the course + the user is looking at. Once we have proper usages and definitions per the XBlock + design, this should use the course the usage is in. + + NOTE: If testing manually, make sure MITX_FEATURES['DISABLE_START_DATES'] = False + in envs/dev.py! + """ + if descriptor.days_early_for_beta is None: + # bail early if no beta testing is set up + return descriptor.start + + user_groups = [g.name for g in user.groups.all()] + + beta_group = course_beta_test_group_name(descriptor.location) + if beta_group in user_groups: + debug("Adjust start time: user in group %s", beta_group) + # time_structs don't support subtraction, so convert to datetimes, + # subtract, convert back. + # (fun fact: datetime(*a_time_struct[:6]) is the beautiful syntax for + # converting time_structs into datetimes) + start_as_datetime = datetime(*descriptor.start[:6]) + delta = timedelta(descriptor.days_early_for_beta) + effective = start_as_datetime - delta + # ...and back to time_struct + return effective.timetuple() + + return descriptor.start + def _has_instructor_access_to_location(user, location): return _has_access_to_location(user, location, 'instructor') diff --git a/lms/djangoapps/courseware/admin.py b/lms/djangoapps/courseware/admin.py index cda4fbb788..f7e54d1800 100644 --- a/lms/djangoapps/courseware/admin.py +++ b/lms/djangoapps/courseware/admin.py @@ -7,3 +7,8 @@ from django.contrib import admin from django.contrib.auth.models import User admin.site.register(StudentModule) + +admin.site.register(OfflineComputedGrade) + +admin.site.register(OfflineComputedGradeLog) + diff --git a/lms/djangoapps/courseware/courses.py b/lms/djangoapps/courseware/courses.py index 65a1eee25b..7c0d30ebd8 100644 --- a/lms/djangoapps/courseware/courses.py +++ b/lms/djangoapps/courseware/courses.py @@ -217,11 +217,21 @@ def get_courses_by_university(user, domain=None): ''' # TODO: Clean up how 'error' is done. # filter out any courses that errored. - visible_courses = branding.get_visible_courses(domain) + visible_courses = get_courses(user, domain) universities = defaultdict(list) for course in visible_courses: - if not has_access(user, course, 'see_exists'): - continue universities[course.org].append(course) + return universities + + +def get_courses(user, domain=None): + ''' + Returns a list of courses available, sorted by course.number + ''' + courses = branding.get_visible_courses(domain) + courses = [c for c in courses if has_access(user, c, 'see_exists')] + + courses = sorted(courses, key=lambda course:course.number) + return courses diff --git a/lms/djangoapps/courseware/features/courses.py b/lms/djangoapps/courseware/features/courses.py new file mode 100644 index 0000000000..aecaa139ff --- /dev/null +++ b/lms/djangoapps/courseware/features/courses.py @@ -0,0 +1,254 @@ +from lettuce import world +from xmodule.course_module import CourseDescriptor +from xmodule.modulestore.django import modulestore +from courseware.courses import get_course_by_id +from xmodule import seq_module, vertical_module + +from logging import getLogger +logger = getLogger(__name__) + +## support functions +def get_courses(): + ''' + Returns dict of lists of courses available, keyed by course.org (ie university). + Courses are sorted by course.number. + ''' + courses = [c for c in modulestore().get_courses() + if isinstance(c, CourseDescriptor)] + courses = sorted(courses, key=lambda course: course.number) + return courses + +# def get_courseware(course_id): +# """ +# Given a course_id (string), return a courseware array of dictionaries for the +# top two levels of navigation. Example: + +# [ +# {'chapter_name': 'Overview', +# 'sections': ['Welcome', 'System Usage Sequence', 'Lab0: Using the tools', 'Circuit Sandbox'] +# }, +# {'chapter_name': 'Week 1', +# 'sections': ['Administrivia and Circuit Elements', 'Basic Circuit Analysis', 'Resistor Divider', 'Week 1 Tutorials'] +# }, +# {'chapter_name': 'Midterm Exam', +# 'sections': ['Midterm Exam'] +# } +# ] +# """ + +# course = get_course_by_id(course_id) +# chapters = course.get_children() +# courseware = [ {'chapter_name':c.display_name, 'sections':[s.display_name for s in c.get_children()]} for c in chapters] +# return courseware + +def get_courseware_with_tabs(course_id): + """ + Given a course_id (string), return a courseware array of dictionaries for the + top three levels of navigation. Same as get_courseware() except include + the tabs on the right hand main navigation page. + + This hides the appropriate courseware as defined by the XML flag test: + chapter.metadata.get('hide_from_toc','false').lower() == 'true' + + Example: + + [{ + 'chapter_name': 'Overview', + 'sections': [{ + 'clickable_tab_count': 0, + 'section_name': 'Welcome', + 'tab_classes': [] + }, { + 'clickable_tab_count': 1, + 'section_name': 'System Usage Sequence', + 'tab_classes': ['VerticalDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Lab0: Using the tools', + 'tab_classes': ['HtmlDescriptor', 'HtmlDescriptor', 'CapaDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Circuit Sandbox', + 'tab_classes': [] + }] + }, { + 'chapter_name': 'Week 1', + 'sections': [{ + 'clickable_tab_count': 4, + 'section_name': 'Administrivia and Circuit Elements', + 'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Basic Circuit Analysis', + 'tab_classes': ['CapaDescriptor', 'CapaDescriptor', 'CapaDescriptor'] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Resistor Divider', + 'tab_classes': [] + }, { + 'clickable_tab_count': 0, + 'section_name': 'Week 1 Tutorials', + 'tab_classes': [] + }] + }, { + 'chapter_name': 'Midterm Exam', + 'sections': [{ + 'clickable_tab_count': 2, + 'section_name': 'Midterm Exam', + 'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor'] + }] + }] + """ + + course = get_course_by_id(course_id) + chapters = [ chapter for chapter in course.get_children() if chapter.metadata.get('hide_from_toc','false').lower() != 'true' ] + courseware = [{'chapter_name':c.display_name, + 'sections':[{'section_name':s.display_name, + 'clickable_tab_count':len(s.get_children()) if (type(s)==seq_module.SequenceDescriptor) else 0, + 'tabs':[{'children_count':len(t.get_children()) if (type(t)==vertical_module.VerticalDescriptor) else 0, + 'class':t.__class__.__name__ } + for t in s.get_children() ]} + for s in c.get_children() if s.metadata.get('hide_from_toc', 'false').lower() != 'true']} + for c in chapters ] + + return courseware + +def process_section(element, num_tabs=0): + ''' + Process section reads through whatever is in 'course-content' and classifies it according to sequence module type. + + This function is recursive + + There are 6 types, with 6 actions. + + Sequence Module + -contains one child module + + Vertical Module + -contains other modules + -process it and get its children, then process them + + Capa Module + -problem type, contains only one problem + -for this, the most complex type, we created a separate method, process_problem + + Video Module + -video type, contains only one video + -we only check to ensure that a section with class of video exists + + HTML Module + -html text + -we do not check anything about it + + Custom Tag Module + -a custom 'hack' module type + -there is a large variety of content that could go in a custom tag module, so we just pass if it is of this unusual type + + can be used like this: + e = world.browser.find_by_css('section.course-content section') + process_section(e) + + ''' + if element.has_class('xmodule_display xmodule_SequenceModule'): + logger.debug('####### Processing xmodule_SequenceModule') + child_modules = element.find_by_css("div>div>section[class^='xmodule']") + for mod in child_modules: + process_section(mod) + + elif element.has_class('xmodule_display xmodule_VerticalModule'): + logger.debug('####### Processing xmodule_VerticalModule') + vert_list = element.find_by_css("li section[class^='xmodule']") + for item in vert_list: + process_section(item) + + elif element.has_class('xmodule_display xmodule_CapaModule'): + logger.debug('####### Processing xmodule_CapaModule') + assert element.find_by_css("section[id^='problem']"), "No problems found in Capa Module" + p = element.find_by_css("section[id^='problem']").first + p_id = p['id'] + logger.debug('####################') + logger.debug('id is "%s"' % p_id) + logger.debug('####################') + process_problem(p, p_id) + + elif element.has_class('xmodule_display xmodule_VideoModule'): + logger.debug('####### Processing xmodule_VideoModule') + assert element.find_by_css("section[class^='video']"), "No video found in Video Module" + + elif element.has_class('xmodule_display xmodule_HtmlModule'): + logger.debug('####### Processing xmodule_HtmlModule') + pass + + elif element.has_class('xmodule_display xmodule_CustomTagModule'): + logger.debug('####### Processing xmodule_CustomTagModule') + pass + + else: + assert False, "Class for element not recognized!!" + + + +def process_problem(element, problem_id): + ''' + Process problem attempts to + 1) scan all the input fields and reset them + 2) click the 'check' button and look for an incorrect response (p.status text should be 'incorrect') + 3) click the 'show answer' button IF it exists and IF the answer is not already displayed + 4) enter the correct answer in each input box + 5) click the 'check' button and verify that answers are correct + + Because of all the ajax calls happening, sometimes the test fails because objects disconnect from the DOM. + The basic functionality does exist, though, and I'm hoping that someone can take it over and make it super effective. + ''' + + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + + ## clear out all input to ensure an incorrect result + for field in input_fields: + field.find_by_css("input").first.fill('') + + ## because of cookies or the application, only click the 'check' button if the status is not already 'incorrect' + # This would need to be reworked because multiple choice problems don't have this status + # if prob_xmod.find_by_css("p.status").first.text.strip().lower() != 'incorrect': + prob_xmod.find_by_css("section.action input.check").first.click() + + ## all elements become disconnected after the click + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + # Wait for the ajax reload + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + for field in input_fields: + assert field.find_by_css("div.incorrect"), "The 'check' button did not work for %s" % (problem_id) + + show_button = element.find_by_css("section.action input.show").first + ## this logic is to ensure we do not accidentally hide the answers + if show_button.value.lower() == 'show answer': + show_button.click() + else: + pass + + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + + ## in each field, find the answer, and send it to the field. + ## Note that this does not work if the answer type is a strange format, e.g. "either a or b" + for field in input_fields: + field.find_by_css("input").first.fill(field.find_by_css("p[id^='answer']").first.text) + + prob_xmod.find_by_css("section.action input.check").first.click() + + ## assert that we entered the correct answers + ## grab element and prob_xmod because the dom has changed (some classes/elements became hidden and changed the hierarchy) + assert world.browser.is_element_present_by_css("section[id='%s']" % problem_id, wait_time=5) + element = world.browser.find_by_css("section[id='%s']" % problem_id).first + prob_xmod = element.find_by_css("section.problem").first + input_fields = prob_xmod.find_by_css("section[id^='input']") + for field in input_fields: + ## if you don't use 'starts with ^=' the test will fail because the actual class is 'correct ' (with a space) + assert field.find_by_css("div[class^='correct']"), "The check answer values were not correct for %s" % problem_id diff --git a/lms/djangoapps/courseware/features/courseware.feature b/lms/djangoapps/courseware/features/courseware.feature new file mode 100644 index 0000000000..21c7e84541 --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware.feature @@ -0,0 +1,18 @@ +Feature: View the Courseware Tab + As a student in an edX course + In order to work on the course + I want to view the info on the courseware tab + + Scenario: I can get to the courseware tab when logged in + Given I am registered for a course + And I log in + And I click on View Courseware + When I click on the "Courseware" tab + Then the "Courseware" tab is active + + # TODO: fix this one? Not sure whether you should get a 404. + # Scenario: I cannot get to the courseware tab when not logged in + # Given I am not logged in + # And I visit the homepage + # When I visit the courseware URL + # Then the login dialog is visible diff --git a/lms/djangoapps/courseware/features/courseware.py b/lms/djangoapps/courseware/features/courseware.py new file mode 100644 index 0000000000..05ecd63f4b --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware.py @@ -0,0 +1,7 @@ +from lettuce import world, step +from lettuce.django import django_url + +@step('I visit the courseware URL$') +def i_visit_the_course_info_url(step): + url = django_url('/courses/MITx/6.002x/2012_Fall/courseware') + world.browser.visit(url) \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/courseware_common.py b/lms/djangoapps/courseware/features/courseware_common.py new file mode 100644 index 0000000000..8850c88fef --- /dev/null +++ b/lms/djangoapps/courseware/features/courseware_common.py @@ -0,0 +1,37 @@ +from lettuce import world, step +from lettuce.django import django_url + +@step('I click on View Courseware') +def i_click_on_view_courseware(step): + css = 'p.enter-course' + world.browser.find_by_css(css).first.click() + +@step('I click on the "([^"]*)" tab$') +def i_click_on_the_tab(step, tab): + world.browser.find_link_by_text(tab).first.click() + world.save_the_html() + +@step('I visit the courseware URL$') +def i_visit_the_course_info_url(step): + url = django_url('/courses/MITx/6.002x/2012_Fall/courseware') + world.browser.visit(url) + +@step(u'I do not see "([^"]*)" anywhere on the page') +def i_do_not_see_text_anywhere_on_the_page(step, text): + assert world.browser.is_text_not_present(text) + +@step(u'I am on the dashboard page$') +def i_am_on_the_dashboard_page(step): + assert world.browser.is_element_present_by_css('section.courses') + assert world.browser.url == django_url('/dashboard') + +@step('the "([^"]*)" tab is active$') +def the_tab_is_active(step, tab): + css = '.course-tabs a.active' + active_tab = world.browser.find_by_css(css) + assert (active_tab.text == tab) + +@step('the login dialog is visible$') +def login_dialog_visible(step): + css = 'form#login_form.login_form' + assert world.browser.find_by_css(css).visible diff --git a/lms/djangoapps/courseware/features/high-level-tabs.feature b/lms/djangoapps/courseware/features/high-level-tabs.feature new file mode 100644 index 0000000000..2e9c4f1886 --- /dev/null +++ b/lms/djangoapps/courseware/features/high-level-tabs.feature @@ -0,0 +1,23 @@ +Feature: All the high level tabs should work + In order to preview the courseware + As a student + I want to navigate through the high level tabs + +# Note this didn't work as a scenario outline because +# before each scenario was not flushing the database +# TODO: break this apart so that if one fails the others +# will still run + Scenario: A student can see all tabs of the course + Given I am registered for a course + And I log in + And I click on View Courseware + When I click on the "Courseware" tab + Then the page title should be "6.002x Courseware" + When I click on the "Course Info" tab + Then the page title should be "6.002x Course Info" + When I click on the "Textbook" tab + Then the page title should be "6.002x Textbook" + When I click on the "Wiki" tab + Then the page title should be "6.002x | edX Wiki" + When I click on the "Progress" tab + Then the page title should be "6.002x Progress" diff --git a/lms/djangoapps/courseware/features/openended.feature b/lms/djangoapps/courseware/features/openended.feature new file mode 100644 index 0000000000..3c7043ba54 --- /dev/null +++ b/lms/djangoapps/courseware/features/openended.feature @@ -0,0 +1,33 @@ +Feature: Open ended grading + As a student in an edX course + In order to complete the courseware questions + I want the machine learning grading to be functional + + Scenario: An answer that is too short is rejected + Given I navigate to an openended question + And I enter the answer "z" + When I press the "Check" button + And I wait for "8" seconds + And I see the grader status "Submitted for grading" + And I press the "Recheck for Feedback" button + Then I see the red X + And I see the grader score "0" + + Scenario: An answer with too many spelling errors is rejected + Given I navigate to an openended question + And I enter the answer "az" + When I press the "Check" button + And I wait for "8" seconds + And I see the grader status "Submitted for grading" + And I press the "Recheck for Feedback" button + Then I see the red X + And I see the grader score "0" + When I click the link for full output + Then I see the spelling grading message "More spelling errors than average." + + Scenario: An answer makes its way to the instructor dashboard + Given I navigate to an openended question as staff + When I submit the answer "I love Chemistry." + And I wait for "8" seconds + And I visit the staff grading page + Then my answer is queued for instructor grading \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/openended.py b/lms/djangoapps/courseware/features/openended.py new file mode 100644 index 0000000000..d37f9a0fae --- /dev/null +++ b/lms/djangoapps/courseware/features/openended.py @@ -0,0 +1,89 @@ +from lettuce import world, step +from lettuce.django import django_url +from nose.tools import assert_equals, assert_in +from logging import getLogger +logger = getLogger(__name__) + +@step('I navigate to an openended question$') +def navigate_to_an_openended_question(step): + world.register_by_course_id('MITx/3.091x/2012_Fall') + world.log_in('robot@edx.org','test') + problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/' + world.browser.visit(django_url(problem)) + tab_css = 'ol#sequence-list > li > a[data-element="5"]' + world.browser.find_by_css(tab_css).click() + +@step('I navigate to an openended question as staff$') +def navigate_to_an_openended_question_as_staff(step): + world.register_by_course_id('MITx/3.091x/2012_Fall', True) + world.log_in('robot@edx.org','test') + problem = '/courses/MITx/3.091x/2012_Fall/courseware/Week_10/Polymer_Synthesis/' + world.browser.visit(django_url(problem)) + tab_css = 'ol#sequence-list > li > a[data-element="5"]' + world.browser.find_by_css(tab_css).click() + +@step(u'I enter the answer "([^"]*)"$') +def enter_the_answer_text(step, text): + textarea_css = 'textarea' + world.browser.find_by_css(textarea_css).first.fill(text) + +@step(u'I submit the answer "([^"]*)"$') +def i_submit_the_answer_text(step, text): + textarea_css = 'textarea' + world.browser.find_by_css(textarea_css).first.fill(text) + check_css = 'input.check' + world.browser.find_by_css(check_css).click() + +@step('I click the link for full output$') +def click_full_output_link(step): + link_css = 'a.full' + world.browser.find_by_css(link_css).first.click() + +@step(u'I visit the staff grading page$') +def i_visit_the_staff_grading_page(step): + # course_u = '/courses/MITx/3.091x/2012_Fall' + # sg_url = '%s/staff_grading' % course_u + world.browser.click_link_by_text('Instructor') + world.browser.click_link_by_text('Staff grading') + # world.browser.visit(django_url(sg_url)) + +@step(u'I see the grader message "([^"]*)"$') +def see_grader_message(step, msg): + message_css = 'div.external-grader-message' + grader_msg = world.browser.find_by_css(message_css).text + assert_in(msg, grader_msg) + +@step(u'I see the grader status "([^"]*)"$') +def see_the_grader_status(step, status): + status_css = 'div.grader-status' + grader_status = world.browser.find_by_css(status_css).text + assert_equals(status, grader_status) + +@step('I see the red X$') +def see_the_red_x(step): + x_css = 'div.grader-status > span.incorrect' + assert world.browser.find_by_css(x_css) + +@step(u'I see the grader score "([^"]*)"$') +def see_the_grader_score(step, score): + score_css = 'div.result-output > p' + score_text = world.browser.find_by_css(score_css).text + assert_equals(score_text, 'Score: %s' % score) + +@step('I see the link for full output$') +def see_full_output_link(step): + link_css = 'a.full' + assert world.browser.find_by_css(link_css) + +@step('I see the spelling grading message "([^"]*)"$') +def see_spelling_msg(step, msg): + spelling_css = 'div.spelling' + spelling_msg = world.browser.find_by_css(spelling_css).text + assert_equals('Spelling: %s' % msg, spelling_msg) + +@step(u'my answer is queued for instructor grading$') +def answer_is_queued_for_instructor_grading(step): + list_css = 'ul.problem-list > li > a' + actual_msg = world.browser.find_by_css(list_css).text + expected_msg = "(0 graded, 1 pending)" + assert_in(expected_msg, actual_msg) diff --git a/lms/djangoapps/courseware/features/smart-accordion.feature b/lms/djangoapps/courseware/features/smart-accordion.feature new file mode 100644 index 0000000000..90d097144a --- /dev/null +++ b/lms/djangoapps/courseware/features/smart-accordion.feature @@ -0,0 +1,59 @@ +# Here are all the courses for Fall 2012 +# MITx/3.091x/2012_Fall +# MITx/6.002x/2012_Fall +# MITx/6.00x/2012_Fall +# HarvardX/CS50x/2012 (we will not be testing this, as it is anomolistic) +# HarvardX/PH207x/2012_Fall +# BerkeleyX/CS169.1x/2012_Fall +# BerkeleyX/CS169.2x/2012_Fall +# BerkeleyX/CS184.1x/2012_Fall + +#You can load the courses into your data directory with these cmds: +# git clone https://github.com/MITx/3.091x.git +# git clone https://github.com/MITx/6.00x.git +# git clone https://github.com/MITx/content-mit-6002x.git +# git clone https://github.com/MITx/content-mit-6002x.git +# git clone https://github.com/MITx/content-harvard-id270x.git +# git clone https://github.com/MITx/content-berkeley-cs169x.git +# git clone https://github.com/MITx/content-berkeley-cs169.2x.git +# git clone https://github.com/MITx/content-berkeley-cs184x.git + +Feature: There are courses on the homepage + In order to compared rendered content to the database + As an acceptance test + I want to count all the chapters, sections, and tabs for each course + + Scenario: Navigate through course MITx/3.091x/2012_Fall + Given I am registered for course "MITx/3.091x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course MITx/6.002x/2012_Fall + Given I am registered for course "MITx/6.002x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course MITx/6.00x/2012_Fall + Given I am registered for course "MITx/6.00x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course HarvardX/PH207x/2012_Fall + Given I am registered for course "HarvardX/PH207x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS169.1x/2012_Fall + Given I am registered for course "BerkeleyX/CS169.1x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS169.2x/2012_Fall + Given I am registered for course "BerkeleyX/CS169.2x/2012_Fall" + And I log in + Then I verify all the content of each course + + Scenario: Navigate through course BerkeleyX/CS184.1x/2012_Fall + Given I am registered for course "BerkeleyX/CS184.1x/2012_Fall" + And I log in + Then I verify all the content of each course \ No newline at end of file diff --git a/lms/djangoapps/courseware/features/smart-accordion.py b/lms/djangoapps/courseware/features/smart-accordion.py new file mode 100644 index 0000000000..95d3396f57 --- /dev/null +++ b/lms/djangoapps/courseware/features/smart-accordion.py @@ -0,0 +1,152 @@ +from lettuce import world, step +from re import sub +from nose.tools import assert_equals +from xmodule.modulestore.django import modulestore +from courses import * + +from logging import getLogger +logger = getLogger(__name__) + +def check_for_errors(): + e = world.browser.find_by_css('.outside-app') + if len(e) > 0: + assert False, 'there was a server error at %s' % (world.browser.url) + else: + assert True + +@step(u'I verify all the content of each course') +def i_verify_all_the_content_of_each_course(step): + all_possible_courses = get_courses() + logger.debug('Courses found:') + for c in all_possible_courses: + logger.debug(c.id) + ids = [c.id for c in all_possible_courses] + + # Get a list of all the registered courses + registered_courses = world.browser.find_by_css('article.my-course') + if len(all_possible_courses) < len(registered_courses): + assert False, "user is registered for more courses than are uniquely posssible" + else: + pass + + for test_course in registered_courses: + test_course.find_by_css('a').click() + check_for_errors() + + # Get the course. E.g. 'MITx/6.002x/2012_Fall' + current_course = sub('/info','', sub('.*/courses/', '', world.browser.url)) + validate_course(current_course,ids) + + world.browser.find_link_by_text('Courseware').click() + assert world.browser.is_element_present_by_id('accordion',wait_time=2) + check_for_errors() + browse_course(current_course) + + # clicking the user link gets you back to the user's home page + world.browser.find_by_css('.user-link').click() + check_for_errors() + +def browse_course(course_id): + + ## count chapters from xml and page and compare + chapters = get_courseware_with_tabs(course_id) + num_chapters = len(chapters) + + rendered_chapters = world.browser.find_by_css('#accordion > nav > div') + num_rendered_chapters = len(rendered_chapters) + + msg = '%d chapters expected, %d chapters found on page for %s' % (num_chapters, num_rendered_chapters, course_id) + #logger.debug(msg) + assert num_chapters == num_rendered_chapters, msg + + chapter_it = 0 + + ## Iterate the chapters + while chapter_it < num_chapters: + + ## click into a chapter + world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('h3').click() + + ## look for the "there was a server error" div + check_for_errors() + + ## count sections from xml and page and compare + sections = chapters[chapter_it]['sections'] + num_sections = len(sections) + + rendered_sections = world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li') + num_rendered_sections = len(rendered_sections) + + msg = ('%d sections expected, %d sections found on page, %s - %d - %s' % + (num_sections, num_rendered_sections, course_id, chapter_it, chapters[chapter_it]['chapter_name'])) + #logger.debug(msg) + assert num_sections == num_rendered_sections, msg + + section_it = 0 + + ## Iterate the sections + while section_it < num_sections: + + ## click on a section + world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li')[section_it].find_by_tag('a').click() + + ## sometimes the course-content takes a long time to load + assert world.browser.is_element_present_by_css('.course-content',wait_time=5) + + ## look for server error div + check_for_errors() + + ## count tabs from xml and page and compare + + ## count the number of tabs. If number of tabs is 0, there won't be anything rendered + ## so we explicitly set rendered_tabs because otherwise find_elements returns a None object with no length + num_tabs = sections[section_it]['clickable_tab_count'] + if num_tabs != 0: + rendered_tabs = world.browser.find_by_css('ol#sequence-list > li') + num_rendered_tabs = len(rendered_tabs) + else: + rendered_tabs = 0 + num_rendered_tabs = 0 + + msg = ('%d tabs expected, %d tabs found, %s - %d - %s' % + (num_tabs, num_rendered_tabs, course_id, section_it, sections[section_it]['section_name'])) + #logger.debug(msg) + + # Save the HTML to a file for later comparison + world.save_the_course_content('/tmp/%s' % course_id) + + assert num_tabs == num_rendered_tabs, msg + + tabs = sections[section_it]['tabs'] + tab_it = 0 + + ## Iterate the tabs + while tab_it < num_tabs: + + rendered_tabs[tab_it].find_by_tag('a').click() + + ## do something with the tab sections[section_it] + # e = world.browser.find_by_css('section.course-content section') + # process_section(e) + tab_children = tabs[tab_it]['children_count'] + tab_class = tabs[tab_it]['class'] + if tab_children != 0: + rendered_items = world.browser.find_by_css('div#seq_content > section > ol > li > section') + num_rendered_items = len(rendered_items) + msg = ('%d items expected, %d items found, %s - %d - %s - tab %d' % + (tab_children, num_rendered_items, course_id, section_it, sections[section_it]['section_name'], tab_it)) + #logger.debug(msg) + assert tab_children == num_rendered_items, msg + + tab_it += 1 + + section_it += 1 + + chapter_it += 1 + + +def validate_course(current_course, ids): + try: + ids.index(current_course) + except: + assert False, "invalid course id %s" % current_course diff --git a/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py b/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py new file mode 100644 index 0000000000..674f97cec8 --- /dev/null +++ b/lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'OfflineComputedGrade' + db.create_table('courseware_offlinecomputedgrade', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)), + ('gradeset', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), + )) + db.send_create_signal('courseware', ['OfflineComputedGrade']) + + # Adding unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id'] + db.create_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id']) + + # Adding model 'OfflineComputedGradeLog' + db.create_table('courseware_offlinecomputedgradelog', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), + ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('seconds', self.gf('django.db.models.fields.IntegerField')(default=0)), + ('nstudents', self.gf('django.db.models.fields.IntegerField')(default=0)), + )) + db.send_create_signal('courseware', ['OfflineComputedGradeLog']) + + + def backwards(self, orm): + # Removing unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id'] + db.delete_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id']) + + # Deleting model 'OfflineComputedGrade' + db.delete_table('courseware_offlinecomputedgrade') + + # Deleting model 'OfflineComputedGradeLog' + db.delete_table('courseware_offlinecomputedgradelog') + + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + }, + 'courseware.offlinecomputedgrade': { + 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'gradeset': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + }, + 'courseware.offlinecomputedgradelog': { + 'Meta': {'object_name': 'OfflineComputedGradeLog'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'nstudents': ('django.db.models.fields.IntegerField', [], {'default': '0'}), + 'seconds': ('django.db.models.fields.IntegerField', [], {'default': '0'}) + }, + 'courseware.studentmodule': { + 'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'}, + 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), + 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), + 'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}), + 'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), + 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), + 'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}), + 'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}), + 'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), + 'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) + } + } + + complete_apps = ['courseware'] \ No newline at end of file diff --git a/lms/djangoapps/courseware/models.py b/lms/djangoapps/courseware/models.py index ffc7c929de..21ef8b3d66 100644 --- a/lms/djangoapps/courseware/models.py +++ b/lms/djangoapps/courseware/models.py @@ -177,3 +177,40 @@ class StudentModuleCache(object): def append(self, student_module): self.cache.append(student_module) + + +class OfflineComputedGrade(models.Model): + """ + Table of grades computed offline for a given user and course. + """ + user = models.ForeignKey(User, db_index=True) + course_id = models.CharField(max_length=255, db_index=True) + + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + updated = models.DateTimeField(auto_now=True, db_index=True) + + gradeset = models.TextField(null=True, blank=True) # grades, stored as JSON + + class Meta: + unique_together = (('user', 'course_id'), ) + + def __unicode__(self): + return "[OfflineComputedGrade] %s: %s (%s) = %s" % (self.user, self.course_id, self.created, self.gradeset) + + +class OfflineComputedGradeLog(models.Model): + """ + Log of when offline grades are computed. + Use this to be able to show instructor when the last computed grades were done. + """ + class Meta: + ordering = ["-created"] + get_latest_by = "created" + + course_id = models.CharField(max_length=255, db_index=True) + created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) + seconds = models.IntegerField(default=0) # seconds elapsed for computation + nstudents = models.IntegerField(default=0) + + def __unicode__(self): + return "[OCGLog] %s: %s" % (self.course_id, self.created) diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py index 67927c0ee7..bd919eeb15 100644 --- a/lms/djangoapps/courseware/module_render.py +++ b/lms/djangoapps/courseware/module_render.py @@ -1,4 +1,3 @@ -import hashlib import json import logging import pyparsing @@ -20,6 +19,7 @@ from mitxmako.shortcuts import render_to_string from models import StudentModule, StudentModuleCache from psychometrics.psychoanalyze import make_psychometrics_data_update_handler from static_replace import replace_urls +from student.models import unique_id_for_user from xmodule.errortracker import exc_info_to_str from xmodule.exceptions import NotFoundError from xmodule.modulestore import Location @@ -152,12 +152,6 @@ def _get_module(user, request, location, student_module_cache, course_id, positi if not has_access(user, descriptor, 'load'): return None - # Anonymized student identifier - h = hashlib.md5() - h.update(settings.SECRET_KEY) - h.update(str(user.id)) - anonymous_student_id = h.hexdigest() - # Only check the cache if this module can possibly have state instance_module = None shared_module = None @@ -230,7 +224,8 @@ def _get_module(user, request, location, student_module_cache, course_id, positi # by the replace_static_urls code below replace_urls=replace_urls, node_path=settings.NODE_PATH, - anonymous_student_id=anonymous_student_id, + anonymous_student_id=unique_id_for_user(user), + course_id=course_id, ) # pass position specified in URL to module through ModuleSystem system.set('position', position) diff --git a/lms/djangoapps/courseware/tabs.py b/lms/djangoapps/courseware/tabs.py index 980fedb947..47730f77cc 100644 --- a/lms/djangoapps/courseware/tabs.py +++ b/lms/djangoapps/courseware/tabs.py @@ -11,6 +11,7 @@ actually generates the CourseTab. from collections import namedtuple import logging +import json from django.conf import settings from django.core.urlresolvers import reverse @@ -20,6 +21,10 @@ from fs.errors import ResourceNotFoundError from courseware.access import has_access from static_replace import replace_urls +from open_ended_grading.peer_grading_service import PeerGradingService +from open_ended_grading.staff_grading_service import StaffGradingService +from student.models import unique_id_for_user + log = logging.getLogger(__name__) class InvalidTabsException(Exception): @@ -28,7 +33,10 @@ class InvalidTabsException(Exception): """ pass -CourseTab = namedtuple('CourseTab', 'name link is_active') +CourseTabBase = namedtuple('CourseTab', 'name link is_active has_img img') + +def CourseTab(name, link, is_active, has_img=False, img=""): + return CourseTabBase(name, link, is_active, has_img, img) # encapsulate implementation for a tab: # - a validation function: takes the config dict and raises @@ -36,7 +44,7 @@ CourseTab = namedtuple('CourseTab', 'name link is_active') # wrong. (e.g. "is there a 'name' field?). Validators can assume # that the type field is valid. # -# - a function that takes a config, a user, and a course, and active_page and +# - a function that takes a config, a user, and a course, an active_page and # return a list of CourseTabs. (e.g. "return a CourseTab with specified # name, link to courseware, and is_active=True/False"). The function can # assume that it is only called with configs of the appropriate type that @@ -97,6 +105,53 @@ def _textbooks(tab, user, course, active_page): for index, textbook in enumerate(course.textbooks)] return [] + +def _staff_grading(tab, user, course, active_page): + if has_access(user, course, 'staff'): + link = reverse('staff_grading', args=[course.id]) + staff_gs = StaffGradingService(settings.STAFF_GRADING_INTERFACE) + pending_grading=False + tab_name = "Staff grading" + img_path= "" + try: + notifications = json.loads(staff_gs.get_notifications(course.id)) + if notifications['success']: + if notifications['staff_needs_to_grade']: + pending_grading=True + except: + #Non catastrophic error, so no real action + log.info("Problem with getting notifications from staff grading service.") + + if pending_grading: + img_path = "/static/images/slider-handle.png" + + tab = [CourseTab(tab_name, link, active_page == "staff_grading", pending_grading, img_path)] + return tab + return [] + +def _peer_grading(tab, user, course, active_page): + if user.is_authenticated(): + link = reverse('peer_grading', args=[course.id]) + peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE) + pending_grading=False + tab_name = "Peer grading" + img_path= "" + try: + notifications = json.loads(peer_gs.get_notifications(course.id,unique_id_for_user(user))) + if notifications['success']: + if notifications['student_needs_to_peer_grade']: + pending_grading=True + except: + #Non catastrophic error, so no real action + log.info("Problem with getting notifications from peer grading service.") + + if pending_grading: + img_path = "/static/images/slider-handle.png" + + tab = [CourseTab(tab_name, link, active_page == "peer_grading", pending_grading, img_path)] + return tab + return [] + #### Validators @@ -132,6 +187,8 @@ VALID_TAB_TYPES = { 'textbooks': TabImpl(null_validator, _textbooks), 'progress': TabImpl(need_name, _progress), 'static_tab': TabImpl(key_checker(['name', 'url_slug']), _static_tab), + 'peer_grading': TabImpl(null_validator, _peer_grading), + 'staff_grading': TabImpl(null_validator, _staff_grading), } diff --git a/lms/djangoapps/courseware/tests/tests.py b/lms/djangoapps/courseware/tests/tests.py index 8239eadfd9..eeb304b193 100644 --- a/lms/djangoapps/courseware/tests/tests.py +++ b/lms/djangoapps/courseware/tests/tests.py @@ -1,34 +1,32 @@ -import copy +import logging +log = logging.getLogger("mitx." + __name__) + import json -import os -import sys import time -from nose import SkipTest -from path import path -from pprint import pprint from urlparse import urlsplit, urlunsplit from django.contrib.auth.models import User, Group -from django.core.handlers.wsgi import WSGIRequest from django.test import TestCase -from django.test.client import Client, RequestFactory +from django.test.client import RequestFactory from django.conf import settings from django.core.urlresolvers import reverse -from mock import patch, Mock from override_settings import override_settings import xmodule.modulestore.django # Need access to internal func to put users in the right group from courseware import grades -from courseware.access import _course_staff_group_name +from courseware.access import (has_access, _course_staff_group_name, + course_beta_test_group_name) from courseware.models import StudentModuleCache from student.models import Registration +from xmodule.error_module import ErrorDescriptor from xmodule.modulestore.django import modulestore from xmodule.modulestore import Location from xmodule.modulestore.xml_importer import import_from_xml +from xmodule.modulestore.xml import XMLModuleStore from xmodule.timeparse import stringify_time def parse_json(response): @@ -45,26 +43,6 @@ def registration(email): '''look up registration object by email''' return Registration.objects.get(user__email=email) - -# A bit of a hack--want mongo modulestore for these tests, until -# jump_to works with the xmlmodulestore or we have an even better solution -# NOTE: this means this test requires mongo to be running. - -def mongo_store_config(data_dir): - return { - 'default': { - 'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore', - 'OPTIONS': { - 'default_class': 'xmodule.raw_module.RawDescriptor', - 'host': 'localhost', - 'db': 'xmodule', - 'collection': 'modulestore', - 'fs_root': data_dir, - 'render_template': 'mitxmako.shortcuts.render_to_string', - } - } -} - def xml_store_config(data_dir): return { 'default': { @@ -76,14 +54,9 @@ def xml_store_config(data_dir): } } - TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT -TEST_DATA_MONGO_MODULESTORE = mongo_store_config(TEST_DATA_DIR) TEST_DATA_XML_MODULESTORE = xml_store_config(TEST_DATA_DIR) -REAL_DATA_DIR = settings.GITHUB_REPO_ROOT -REAL_DATA_MODULESTORE = mongo_store_config(REAL_DATA_DIR) - class ActivateLoginTestCase(TestCase): '''Check that we can activate and log in''' @@ -221,22 +194,43 @@ class PageLoader(ActivateLoginTestCase): def check_for_get_code(self, code, url): """ - Check that we got the expected code. Hacks around our broken 404 - handling. + Check that we got the expected code when accessing url via GET. + Returns the response. """ resp = self.client.get(url) self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) + return resp + + + def check_for_post_code(self, code, url, data={}): + """ + Check that we got the expected code when accessing url via POST. + Returns the response. + """ + resp = self.client.post(url, data) + self.assertEqual(resp.status_code, code, + "got code {0} for url '{1}'. Expected code {2}" + .format(resp.status_code, url, code)) + return resp + def check_pages_load(self, course_name, data_dir, modstore): """Make all locations in course load""" print "Checking course {0} in {1}".format(course_name, data_dir) - import_from_xml(modstore, data_dir, [course_name]) + default_class='xmodule.hidden_module.HiddenDescriptor' + load_error_modules=True + module_store = XMLModuleStore( + data_dir, + default_class=default_class, + course_dirs=[course_name], + load_error_modules=load_error_modules, + ) - # enroll in the course before trying to access pages - courses = modstore.get_courses() + # enroll in the course before trying to access pages + courses = module_store.get_courses() self.assertEqual(len(courses), 1) course = courses[0] self.enroll(course) @@ -245,35 +239,54 @@ class PageLoader(ActivateLoginTestCase): n = 0 num_bad = 0 all_ok = True - for descriptor in modstore.get_items( - Location(None, None, None, None, None)): + for descriptor in module_store.modules[course_id].itervalues(): n += 1 print "Checking ", descriptor.location.url() #print descriptor.__class__, descriptor.location resp = self.client.get(reverse('jump_to', kwargs={'course_id': course_id, - 'location': descriptor.location.url()})) + 'location': descriptor.location.url()}), follow=True) + # check status codes first msg = str(resp.status_code) + if resp.status_code != 200: + msg = "ERROR " + msg + ": " + descriptor.location.url() + all_ok = False + num_bad += 1 + elif resp.redirect_chain[0][1] != 302: + msg = "ERROR on redirect from " + descriptor.location.url() + all_ok = False + num_bad += 1 - if resp.status_code != 302: - msg = "ERROR " + msg + # check content to make sure there were no rendering failures + content = resp.content + if content.find("this module is temporarily unavailable")>=0: + msg = "ERROR unavailable module " + all_ok = False + num_bad += 1 + elif isinstance(descriptor, ErrorDescriptor): + msg = "ERROR error descriptor loaded: " + msg = msg + descriptor.definition['data']['error_msg'] all_ok = False num_bad += 1 print msg self.assertTrue(all_ok) # fail fast print "{0}/{1} good".format(n - num_bad, n) + log.info( "{0}/{1} good".format(n - num_bad, n)) self.assertTrue(all_ok) -@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE) + +@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE) class TestCoursesLoadTestCase(PageLoader): '''Check that all pages in test courses load properly''' def setUp(self): ActivateLoginTestCase.setUp(self) xmodule.modulestore.django._MODULESTORES = {} - xmodule.modulestore.django.modulestore().collection.drop() +# xmodule.modulestore.django.modulestore().collection.drop() +# store = xmodule.modulestore.django.modulestore() + # is there a way to empty the store? def test_toy_course_loads(self): self.check_pages_load('toy', TEST_DATA_DIR, modulestore()) @@ -288,14 +301,10 @@ class TestNavigation(PageLoader): def setUp(self): xmodule.modulestore.django._MODULESTORES = {} - courses = modulestore().get_courses() - def find_course(course_id): - """Assumes the course is present""" - return [c for c in courses if c.id==course_id][0] - - self.full = find_course("edX/full/6.002_Spring_2012") - self.toy = find_course("edX/toy/2012_Fall") + # Assume courses are there + self.full = modulestore().get_course("edX/full/6.002_Spring_2012") + self.toy = modulestore().get_course("edX/toy/2012_Fall") # Create two accounts self.student = 'view@test.com' @@ -346,14 +355,9 @@ class TestViewAuth(PageLoader): def setUp(self): xmodule.modulestore.django._MODULESTORES = {} - courses = modulestore().get_courses() - def find_course(course_id): - """Assumes the course is present""" - return [c for c in courses if c.id==course_id][0] - - self.full = find_course("edX/full/6.002_Spring_2012") - self.toy = find_course("edX/toy/2012_Fall") + self.full = modulestore().get_course("edX/full/6.002_Spring_2012") + self.toy = modulestore().get_course("edX/toy/2012_Fall") # Create two accounts self.student = 'view@test.com' @@ -450,6 +454,9 @@ class TestViewAuth(PageLoader): """Check that enrollment periods work""" self.run_wrapped(self._do_test_enrollment_period) + def test_beta_period(self): + """Check that beta-test access works""" + self.run_wrapped(self._do_test_beta_period) def _do_test_dark_launch(self): """Actually do the test, relying on settings to be right.""" @@ -615,34 +622,37 @@ class TestViewAuth(PageLoader): self.unenroll(self.toy) self.assertTrue(self.try_enroll(self.toy)) + def _do_test_beta_period(self): + """Actually test beta periods, relying on settings to be right.""" -@override_settings(MODULESTORE=REAL_DATA_MODULESTORE) -class RealCoursesLoadTestCase(PageLoader): - '''Check that all pages in real courses load properly''' + # trust, but verify :) + self.assertFalse(settings.MITX_FEATURES['DISABLE_START_DATES']) - def setUp(self): - ActivateLoginTestCase.setUp(self) - xmodule.modulestore.django._MODULESTORES = {} - xmodule.modulestore.django.modulestore().collection.drop() + # Make courses start in the future + tomorrow = time.time() + 24 * 3600 + nextday = tomorrow + 24 * 3600 + yesterday = time.time() - 24 * 3600 - def test_real_courses_loads(self): - '''See if any real courses are available at the REAL_DATA_DIR. - If they are, check them.''' + # toy course's hasn't started + self.toy.metadata['start'] = stringify_time(time.gmtime(tomorrow)) + self.assertFalse(self.toy.has_started()) - # TODO: Disabled test for now.. Fix once things are cleaned up. - raise SkipTest - # TODO: adjust staticfiles_dirs - if not os.path.isdir(REAL_DATA_DIR): - # No data present. Just pass. - return + # but should be accessible for beta testers + self.toy.metadata['days_early_for_beta'] = '2' - courses = [course_dir for course_dir in os.listdir(REAL_DATA_DIR) - if os.path.isdir(REAL_DATA_DIR / course_dir)] - for course in courses: - self.check_pages_load(course, REAL_DATA_DIR, modulestore()) + # student user shouldn't see it + student_user = user(self.student) + self.assertFalse(has_access(student_user, self.toy, 'load')) + + # now add the student to the beta test group + group_name = course_beta_test_group_name(self.toy.location) + g = Group.objects.create(name=group_name) + g.user_set.add(student_user) + + # now the student should see it + self.assertTrue(has_access(student_user, self.toy, 'load')) - # ========= TODO: check ajax interaction here too? @override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE) class TestCourseGrader(PageLoader): @@ -660,46 +670,46 @@ class TestCourseGrader(PageLoader): return [c for c in courses if c.id==course_id][0] self.graded_course = find_course("edX/graded/2012_Fall") - + # create a test student self.student = 'view@test.com' self.password = 'foo' self.create_account('u1', self.student, self.password) self.activate_user(self.student) self.enroll(self.graded_course) - + self.student_user = user(self.student) - + self.factory = RequestFactory() - + def get_grade_summary(self): student_module_cache = StudentModuleCache.cache_for_descriptor_descendents( self.graded_course.id, self.student_user, self.graded_course) - - fake_request = self.factory.get(reverse('progress', - kwargs={'course_id': self.graded_course.id})) - - return grades.grade(self.student_user, fake_request, - self.graded_course, student_module_cache) - - def get_homework_scores(self): - return self.get_grade_summary()['totaled_scores']['Homework'] - - def get_progress_summary(self): - student_module_cache = StudentModuleCache.cache_for_descriptor_descendents( - self.graded_course.id, self.student_user, self.graded_course) - + fake_request = self.factory.get(reverse('progress', kwargs={'course_id': self.graded_course.id})) - progress_summary = grades.progress_summary(self.student_user, fake_request, + return grades.grade(self.student_user, fake_request, + self.graded_course, student_module_cache) + + def get_homework_scores(self): + return self.get_grade_summary()['totaled_scores']['Homework'] + + def get_progress_summary(self): + student_module_cache = StudentModuleCache.cache_for_descriptor_descendents( + self.graded_course.id, self.student_user, self.graded_course) + + fake_request = self.factory.get(reverse('progress', + kwargs={'course_id': self.graded_course.id})) + + progress_summary = grades.progress_summary(self.student_user, fake_request, self.graded_course, student_module_cache) return progress_summary - + def check_grade_percent(self, percent): grade_summary = self.get_grade_summary() - self.assertEqual(percent, grade_summary['percent']) - + self.assertEqual(grade_summary['percent'], percent) + def submit_question_answer(self, problem_url_name, responses): """ The field names of a problem are hard to determine. This method only works @@ -709,96 +719,96 @@ class TestCourseGrader(PageLoader): input_i4x-edX-graded-problem-H1P3_2_2 """ problem_location = "i4x://edX/graded/problem/{0}".format(problem_url_name) - - modx_url = reverse('modx_dispatch', + + modx_url = reverse('modx_dispatch', kwargs={ 'course_id' : self.graded_course.id, 'location' : problem_location, 'dispatch' : 'problem_check', } ) - + resp = self.client.post(modx_url, { 'input_i4x-edX-graded-problem-{0}_2_1'.format(problem_url_name): responses[0], 'input_i4x-edX-graded-problem-{0}_2_2'.format(problem_url_name): responses[1], }) print "modx_url" , modx_url, "responses" , responses print "resp" , resp - + return resp - + def problem_location(self, problem_url_name): return "i4x://edX/graded/problem/{0}".format(problem_url_name) - + def reset_question_answer(self, problem_url_name): problem_location = self.problem_location(problem_url_name) - - modx_url = reverse('modx_dispatch', + + modx_url = reverse('modx_dispatch', kwargs={ 'course_id' : self.graded_course.id, 'location' : problem_location, 'dispatch' : 'problem_reset', } ) - + resp = self.client.post(modx_url) - return resp - + return resp + def test_get_graded(self): #### Check that the grader shows we have 0% in the course self.check_grade_percent(0) - + #### Submit the answers to a few problems as ajax calls def earned_hw_scores(): """Global scores, each Score is a Problem Set""" return [s.earned for s in self.get_homework_scores()] - + def score_for_hw(hw_url_name): hw_section = [section for section in self.get_progress_summary()[0]['sections'] if section.get('url_name') == hw_url_name][0] return [s.earned for s in hw_section['scores']] - + # Only get half of the first problem correct self.submit_question_answer('H1P1', ['Correct', 'Incorrect']) self.check_grade_percent(0.06) self.assertEqual(earned_hw_scores(), [1.0, 0, 0]) # Order matters self.assertEqual(score_for_hw('Homework1'), [1.0, 0.0]) - + # Get both parts of the first problem correct self.reset_question_answer('H1P1') self.submit_question_answer('H1P1', ['Correct', 'Correct']) self.check_grade_percent(0.13) self.assertEqual(earned_hw_scores(), [2.0, 0, 0]) self.assertEqual(score_for_hw('Homework1'), [2.0, 0.0]) - + # This problem is shown in an ABTest self.submit_question_answer('H1P2', ['Correct', 'Correct']) self.check_grade_percent(0.25) self.assertEqual(earned_hw_scores(), [4.0, 0.0, 0]) - self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) - + self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) + # This problem is hidden in an ABTest. Getting it correct doesn't change total grade self.submit_question_answer('H1P3', ['Correct', 'Correct']) self.check_grade_percent(0.25) self.assertEqual(score_for_hw('Homework1'), [2.0, 2.0]) - + # On the second homework, we only answer half of the questions. # Then it will be dropped when homework three becomes the higher percent # This problem is also weighted to be 4 points (instead of default of 2) - # If the problem was unweighted the percent would have been 0.38 so we + # If the problem was unweighted the percent would have been 0.38 so we # know it works. self.submit_question_answer('H2P1', ['Correct', 'Correct']) self.check_grade_percent(0.42) - self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0]) - + self.assertEqual(earned_hw_scores(), [4.0, 4.0, 0]) + # Third homework self.submit_question_answer('H3P1', ['Correct', 'Correct']) self.check_grade_percent(0.42) # Score didn't change - self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0]) - + self.assertEqual(earned_hw_scores(), [4.0, 4.0, 2.0]) + self.submit_question_answer('H3P2', ['Correct', 'Correct']) self.check_grade_percent(0.5) # Now homework2 dropped. Score changes - self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0]) - + self.assertEqual(earned_hw_scores(), [4.0, 4.0, 4.0]) + # Now we answer the final question (worth half of the grade) self.submit_question_answer('FinalQuestion', ['Correct', 'Correct']) self.check_grade_percent(1.0) # Hooray! We got 100% diff --git a/lms/djangoapps/courseware/views.py b/lms/djangoapps/courseware/views.py index 73d40b05c5..9e52e2b281 100644 --- a/lms/djangoapps/courseware/views.py +++ b/lms/djangoapps/courseware/views.py @@ -17,7 +17,7 @@ from django.views.decorators.cache import cache_control from courseware import grades from courseware.access import has_access -from courseware.courses import (get_course_with_access, get_courses_by_university) +from courseware.courses import (get_courses, get_course_with_access, get_courses_by_university) import courseware.tabs as tabs from courseware.models import StudentModuleCache from module_render import toc_for_course, get_module, get_instance_module @@ -61,16 +61,19 @@ def user_groups(user): return group_names - @ensure_csrf_cookie @cache_if_anonymous def courses(request): ''' Render "find courses" page. The course selection work is done in courseware.courses. ''' - universities = get_courses_by_university(request.user, - domain=request.META.get('HTTP_HOST')) - return render_to_response("courseware/courses.html", {'universities': universities}) + courses = get_courses(request.user, domain=request.META.get('HTTP_HOST')) + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) + + return render_to_response("courseware/courses.html", {'courses': courses}) def render_accordion(request, course, chapter, section): @@ -293,7 +296,6 @@ def index(request, course_id, chapter=None, section=None, return result - @ensure_csrf_cookie def jump_to(request, course_id, location): ''' @@ -318,12 +320,18 @@ def jump_to(request, course_id, location): except NoPathToItem: raise Http404("This location is not in any class: {0}".format(location)) + # choose the appropriate view (and provide the necessary args) based on the + # args provided by the redirect. # Rely on index to do all error handling and access control. - return redirect('courseware_position', - course_id=course_id, - chapter=chapter, - section=section, - position=position) + if chapter is None: + return redirect('courseware', course_id=course_id) + elif section is None: + return redirect('courseware_chapter', course_id=course_id, chapter=chapter) + elif position is None: + return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section) + else: + return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position) + @ensure_csrf_cookie def course_info(request, course_id): """ @@ -430,6 +438,11 @@ def university_profile(request, org_id): # Only grab courses for this org... courses = get_courses_by_university(request.user, domain=request.META.get('HTTP_HOST'))[org_id] + + # Sort courses by how far are they from they start day + key = lambda course: course.days_until_start + courses = sorted(courses, key=key, reverse=True) + context = dict(courses=courses, org_id=org_id) template_file = "university_profile/{0}.html".format(org_id).lower() diff --git a/lms/djangoapps/django_comment_client/models.py b/lms/djangoapps/django_comment_client/models.py index 628ac21a4a..a6a2c23603 100644 --- a/lms/djangoapps/django_comment_client/models.py +++ b/lms/djangoapps/django_comment_client/models.py @@ -2,6 +2,10 @@ import logging from django.db import models from django.contrib.auth.models import User +from django.dispatch import receiver +from django.db.models.signals import post_save + +from student.models import CourseEnrollment from courseware.courses import get_course_by_id @@ -45,3 +49,14 @@ class Permission(models.Model): def __unicode__(self): return self.name + + +@receiver(post_save, sender=CourseEnrollment) +def assign_default_role(sender, instance, **kwargs): + if instance.user.is_staff: + role = Role.objects.get_or_create(course_id=instance.course_id, name="Moderator")[0] + else: + role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0] + + logging.info("assign_default_role: adding %s as %s" % (instance.user, role)) + instance.user.roles.add(role) diff --git a/lms/djangoapps/django_comment_client/utils.py b/lms/djangoapps/django_comment_client/utils.py index fbb87a1584..71fc38c0e1 100644 --- a/lms/djangoapps/django_comment_client/utils.py +++ b/lms/djangoapps/django_comment_client/utils.py @@ -17,6 +17,7 @@ from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore from xmodule.modulestore.search import path_to_location +log = logging.getLogger(__name__) # TODO these should be cached via django's caching rather than in-memory globals _FULLMODULES = None @@ -141,6 +142,15 @@ def initialize_discussion_info(course): for location, module in all_modules.items(): if location.category == 'discussion': + skip_module = False + for key in ('id', 'discussion_category', 'for'): + if key not in module.metadata: + log.warning("Required key '%s' not in discussion %s, leaving out of category map" % (key, module.location)) + skip_module = True + + if skip_module: + continue + id = module.metadata['id'] category = module.metadata['discussion_category'] title = module.metadata['for'] @@ -245,7 +255,7 @@ class QueryCountDebugMiddleware(object): query_time = query.get('duration', 0) / 1000 total_time += float(query_time) - logging.info('%s queries run, total %s seconds' % (len(connection.queries), total_time)) + log.info('%s queries run, total %s seconds' % (len(connection.queries), total_time)) return response def get_ability(course_id, content, user): @@ -317,7 +327,7 @@ def extend_content(content): user = User.objects.get(pk=content['user_id']) roles = dict(('name', role.name.lower()) for role in user.roles.filter(course_id=content['course_id'])) except user.DoesNotExist: - logging.error('User ID {0} in comment content {1} but not in our DB.'.format(content.get('user_id'), content.get('id'))) + log.error('User ID {0} in comment content {1} but not in our DB.'.format(content.get('user_id'), content.get('id'))) content_info = { 'displayed_title': content.get('highlighted_title') or content.get('title', ''), diff --git a/lms/djangoapps/instructor/management/commands/compute_grades.py b/lms/djangoapps/instructor/management/commands/compute_grades.py new file mode 100644 index 0000000000..462833ba3c --- /dev/null +++ b/lms/djangoapps/instructor/management/commands/compute_grades.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# +# django management command: dump grades to csv files +# for use by batch processes + +import os, sys, string +import datetime +import json + +#import student.models +from instructor.offline_gradecalc import * +from courseware.courses import get_course_by_id +from xmodule.modulestore.django import modulestore + +from django.conf import settings +from django.core.management.base import BaseCommand + +class Command(BaseCommand): + help = "Compute grades for all students in a course, and store result in DB.\n" + help += "Usage: compute_grades course_id_or_dir \n" + help += " course_id_or_dir: either course_id or course_dir\n" + help += 'Example course_id: MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section' + + def handle(self, *args, **options): + + print "args = ", args + + if len(args)>0: + course_id = args[0] + else: + print self.help + return + + try: + course = get_course_by_id(course_id) + except Exception as err: + if course_id in modulestore().courses: + course = modulestore().courses[course_id] + else: + print "-----------------------------------------------------------------------------" + print "Sorry, cannot find course %s" % course_id + print "Please provide a course ID or course data directory name, eg content-mit-801rq" + return + + print "-----------------------------------------------------------------------------" + print "Computing grades for %s" % (course.id) + + offline_grade_calculation(course.id) + + + + diff --git a/lms/djangoapps/instructor/offline_gradecalc.py b/lms/djangoapps/instructor/offline_gradecalc.py new file mode 100644 index 0000000000..7c102805b4 --- /dev/null +++ b/lms/djangoapps/instructor/offline_gradecalc.py @@ -0,0 +1,103 @@ +# ======== Offline calculation of grades ============================================================================= +# +# Computing grades of a large number of students can take a long time. These routines allow grades to +# be computed offline, by a batch process (eg cronjob). +# +# The grades are stored in the OfflineComputedGrade table of the courseware model. + +import json +import logging +import time + +import courseware.models + +from collections import namedtuple +from json import JSONEncoder +from courseware import grades, models +from courseware.courses import get_course_by_id +from django.contrib.auth.models import User, Group + + +class MyEncoder(JSONEncoder): + + def _iterencode(self, obj, markers=None): + if isinstance(obj, tuple) and hasattr(obj, '_asdict'): + gen = self._iterencode_dict(obj._asdict(), markers) + else: + gen = JSONEncoder._iterencode(self, obj, markers) + for chunk in gen: + yield chunk + + +def offline_grade_calculation(course_id): + ''' + Compute grades for all students for a specified course, and save results to the DB. + ''' + + tstart = time.time() + enrolled_students = User.objects.filter(courseenrollment__course_id=course_id).prefetch_related("groups").order_by('username') + + enc = MyEncoder() + + class DummyRequest(object): + META = {} + def __init__(self): + return + def get_host(self): + return 'edx.mit.edu' + def is_secure(self): + return False + + request = DummyRequest() + + print "%d enrolled students" % len(enrolled_students) + course = get_course_by_id(course_id) + + for student in enrolled_students: + gradeset = grades.grade(student, request, course, keep_raw_scores=True) + gs = enc.encode(gradeset) + ocg, created = models.OfflineComputedGrade.objects.get_or_create(user=student, course_id=course_id) + ocg.gradeset = gs + ocg.save() + print "%s done" % student # print statement used because this is run by a management command + + tend = time.time() + dt = tend - tstart + + ocgl = models.OfflineComputedGradeLog(course_id=course_id, seconds=dt, nstudents=len(enrolled_students)) + ocgl.save() + print ocgl + print "All Done!" + + +def offline_grades_available(course_id): + ''' + Returns False if no offline grades available for specified course. + Otherwise returns latest log field entry about the available pre-computed grades. + ''' + ocgl = models.OfflineComputedGradeLog.objects.filter(course_id=course_id) + if not ocgl: + return False + return ocgl.latest('created') + + +def student_grades(student, request, course, keep_raw_scores=False, use_offline=False): + ''' + This is the main interface to get grades. It has the same parameters as grades.grade, as well + as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB. + ''' + + if not use_offline: + return grades.grade(student, request, course, keep_raw_scores=keep_raw_scores) + + try: + ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id) + except models.OfflineComputedGrade.DoesNotExist: + return dict(raw_scores=[], section_breakdown=[], + msg='Error: no offline gradeset available for %s, %s' % (student, course.id)) + + return json.loads(ocg.gradeset) + + + + diff --git a/lms/djangoapps/instructor/tests.py b/lms/djangoapps/instructor/tests.py index 532c0c3f68..57c0436921 100644 --- a/lms/djangoapps/instructor/tests.py +++ b/lms/djangoapps/instructor/tests.py @@ -8,10 +8,18 @@ Notes for running by hand: django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/instructor """ +import courseware.tests.tests as ct + +import json + +from nose import SkipTest +from mock import patch, Mock + from override_settings import override_settings -from django.contrib.auth.models import \ - Group # Need access to internal func to put users in the right group +# Need access to internal func to put users in the right group +from django.contrib.auth.models import Group + from django.core.urlresolvers import reverse from django_comment_client.models import Role, FORUM_ROLE_ADMINISTRATOR, \ FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_STUDENT @@ -31,14 +39,9 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader): def setUp(self): xmodule.modulestore.django._MODULESTORES = {} - courses = modulestore().get_courses() - def find_course(name): - """Assumes the course is present""" - return [c for c in courses if c.location.course==name][0] - - self.full = find_course("full") - self.toy = find_course("toy") + self.full = modulestore().get_course("edX/full/6.002_Spring_2012") + self.toy = modulestore().get_course("edX/toy/2012_Fall") # Create two accounts self.student = 'view@test.com' @@ -49,9 +52,12 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader): self.activate_user(self.student) self.activate_user(self.instructor) - group_name = _course_staff_group_name(self.toy.location) - g = Group.objects.create(name=group_name) - g.user_set.add(ct.user(self.instructor)) + def make_instructor(course): + group_name = _course_staff_group_name(course.location) + g = Group.objects.create(name=group_name) + g.user_set.add(ct.user(self.instructor)) + + make_instructor(self.toy) self.logout() self.login(self.instructor, self.password) @@ -67,18 +73,21 @@ class TestInstructorDashboardGradeDownloadCSV(ct.PageLoader): self.assertEqual(response['Content-Type'],'text/csv',msg) - cdisp = response['Content-Disposition'].replace('TT_2012','2012') # jenkins course_id is TT_2012_Fall instead of 2012_Fall? - msg += "cdisp = '{0}'\n".format(cdisp) - self.assertEqual(cdisp,'attachment; filename=grades_edX/toy/2012_Fall.csv',msg) + cdisp = response['Content-Disposition'] + msg += "Content-Disposition = '%s'\n" % cdisp + self.assertEqual(cdisp, 'attachment; filename=grades_{0}.csv'.format(course.id), msg) body = response.content.replace('\r','') msg += "body = '{0}'\n".format(body) + # All the not-actually-in-the-course hw and labs come from the + # default grading policy string in graders.py expected_body = '''"ID","Username","Full Name","edX email","External email","HW 01","HW 02","HW 03","HW 04","HW 05","HW 06","HW 07","HW 08","HW 09","HW 10","HW 11","HW 12","HW Avg","Lab 01","Lab 02","Lab 03","Lab 04","Lab 05","Lab 06","Lab 07","Lab 08","Lab 09","Lab 10","Lab 11","Lab 12","Lab Avg","Midterm","Final" "2","u2","Fred Weasley","view2@test.com","","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0.0","0.0" ''' self.assertEqual(body, expected_body, msg) - + + FORUM_ROLES = [ FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA ] FORUM_ADMIN_ACTION_SUFFIX = { FORUM_ROLE_ADMINISTRATOR : 'admin', FORUM_ROLE_MODERATOR : 'moderator', FORUM_ROLE_COMMUNITY_TA : 'community TA'} FORUM_ADMIN_USER = { FORUM_ROLE_ADMINISTRATOR : 'forumadmin', FORUM_ROLE_MODERATOR : 'forummoderator', FORUM_ROLE_COMMUNITY_TA : 'forummoderator'} @@ -89,22 +98,21 @@ def action_name(operation, rolename): else: return '{0} forum {1}'.format(operation, FORUM_ADMIN_ACTION_SUFFIX[rolename]) + + @override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) class TestInstructorDashboardForumAdmin(ct.PageLoader): ''' Check for change in forum admin role memberships ''' - + def setUp(self): xmodule.modulestore.django._MODULESTORES = {} courses = modulestore().get_courses() - def find_course(name): - """Assumes the course is present""" - return [c for c in courses if c.location.course==name][0] - self.full = find_course("full") - self.toy = find_course("toy") + self.course_id = "edX/toy/2012_Fall" + self.toy = modulestore().get_course(self.course_id) # Create two accounts self.student = 'view@test.com' @@ -123,6 +131,8 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader): self.login(self.instructor, self.password) self.enroll(self.toy) + + def initialize_roles(self, course_id): self.admin_role = Role.objects.get_or_create(name=FORUM_ROLE_ADMINISTRATOR, course_id=course_id)[0] self.moderator_role = Role.objects.get_or_create(name=FORUM_ROLE_MODERATOR, course_id=course_id)[0] @@ -169,7 +179,7 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader): self.assertTrue(response.content.find('Removed "{0}" from "{1}" forum role = "{2}"'.format(username, course.id, rolename))>=0) self.assertFalse(has_forum_access(username, course.id, rolename)) - def test_add_and_readd_forum_admin_users(self): + def test_add_and_read_forum_admin_users(self): course = self.toy self.initialize_roles(course.id) url = reverse('instructor_dashboard', kwargs={'course_id': course.id}) @@ -209,3 +219,5 @@ class TestInstructorDashboardForumAdmin(ct.PageLoader): added_roles.sort() roles = ', '.join(added_roles) self.assertTrue(response.content.find('{0}'.format(roles))>=0, 'not finding roles "{0}"'.format(roles)) + + diff --git a/lms/djangoapps/instructor/views.py b/lms/djangoapps/instructor/views.py index 6c59200786..299bc8b05a 100644 --- a/lms/djangoapps/instructor/views.py +++ b/lms/djangoapps/instructor/views.py @@ -2,13 +2,18 @@ from collections import defaultdict import csv +import itertools +import json import logging import os +import requests import urllib import datetime from datetime import datetime, timedelta -import json from collections import OrderedDict +import json + +from StringIO import StringIO from django.conf import settings from django.contrib.auth.models import User, Group @@ -17,14 +22,20 @@ from django_future.csrf import ensure_csrf_cookie from django.views.decorators.cache import cache_control from mitxmako.shortcuts import render_to_response import requests +from django.core.urlresolvers import reverse from courseware import grades -from courseware.access import has_access, get_access_group_name -from courseware.courses import get_course_with_access -from django_comment_client.models import Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA +from courseware.access import (has_access, get_access_group_name, + course_beta_test_group_name) +from courseware.courses import get_course_with_access +from django_comment_client.models import (Role, + FORUM_ROLE_ADMINISTRATOR, + FORUM_ROLE_MODERATOR, + FORUM_ROLE_COMMUNITY_TA) from django_comment_client.utils import has_forum_access from psychometrics import psychoanalyze -from student.models import CourseEnrollment +from student.models import CourseEnrollment, CourseEnrollmentAllowed +from courseware.models import StudentModule from xmodule.course_module import CourseDescriptor from xmodule.modulestore import Location from xmodule.modulestore.django import modulestore @@ -32,7 +43,9 @@ from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundErr from xmodule.modulestore.search import path_to_location import track.views -log = logging.getLogger("mitx.courseware") +from .offline_gradecalc import student_grades, offline_grades_available + +log = logging.getLogger(__name__) template_imports = {'urllib': urllib} @@ -42,13 +55,12 @@ FORUM_ROLE_REMOVE = 'remove' @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) - def instructor_dashboard(request, course_id): """Display the instructor dashboard for a course.""" course = get_course_with_access(request.user, course_id, 'staff') instructor_access = has_access(request.user, course, 'instructor') # an instructor can manage staff lists - + forum_admin_access = has_forum_access(request.user, course_id, FORUM_ROLE_ADMINISTRATOR) msg = '' @@ -77,9 +89,12 @@ def instructor_dashboard(request, course_id): data.append(['metadata', escape(str(course.metadata))]) datatable['data'] = data - def return_csv(fn, datatable): - response = HttpResponse(mimetype='text/csv') - response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) + def return_csv(fn, datatable, fp=None): + if fp is None: + response = HttpResponse(mimetype='text/csv') + response['Content-Disposition'] = 'attachment; filename={0}'.format(fn) + else: + response = fp writer = csv.writer(response, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) writer.writerow(datatable['header']) for datarow in datatable['data']: @@ -88,16 +103,33 @@ def instructor_dashboard(request, course_id): return response def get_staff_group(course): - staffgrp = get_access_group_name(course, 'staff') + return get_group(course, 'staff') + + def get_instructor_group(course): + return get_group(course, 'instructor') + + def get_group(course, groupname): + grpname = get_access_group_name(course, groupname) try: - group = Group.objects.get(name=staffgrp) + group = Group.objects.get(name=grpname) except Group.DoesNotExist: - group = Group(name=staffgrp) # create the group + group = Group(name=grpname) # create the group group.save() + + def get_beta_group(course): + """ + Get the group for beta testers of course. + """ + # Not using get_group because there is no access control action called + # 'beta', so adding it to get_access_group_name doesn't really make + # sense. + name = course_beta_test_group_name(course.location) + (group, created) = Group.objects.get_or_create(name=name) return group # process actions from form POST action = request.POST.get('action', '') + use_offline = request.POST.get('use_offline_grades',False) if settings.MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD']: if 'GIT pull' in action: @@ -127,100 +159,273 @@ def instructor_dashboard(request, course_id): except Exception as err: msg += '

    Error: {0}

    '.format(escape(err)) - if action == 'Dump list of enrolled students': + if action == 'Dump list of enrolled students' or action=='List enrolled students': log.debug(action) - datatable = get_student_grade_summary_data(request, course, course_id, get_grades=False) + datatable = get_student_grade_summary_data(request, course, course_id, get_grades=False, use_offline=use_offline) datatable['title'] = 'List of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'list-students', {}, page='idashboard') elif 'Dump Grades' in action: log.debug(action) - datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True) + datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) datatable['title'] = 'Summary Grades of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'dump-grades', {}, page='idashboard') elif 'Dump all RAW grades' in action: log.debug(action) datatable = get_student_grade_summary_data(request, course, course_id, get_grades=True, - get_raw_scores=True) + get_raw_scores=True, use_offline=use_offline) datatable['title'] = 'Raw Grades of students enrolled in {0}'.format(course_id) track.views.server_track(request, 'dump-grades-raw', {}, page='idashboard') elif 'Download CSV of all student grades' in action: track.views.server_track(request, 'dump-grades-csv', {}, page='idashboard') return return_csv('grades_{0}.csv'.format(course_id), - get_student_grade_summary_data(request, course, course_id)) + get_student_grade_summary_data(request, course, course_id, use_offline=use_offline)) elif 'Download CSV of all RAW grades' in action: track.views.server_track(request, 'dump-grades-csv-raw', {}, page='idashboard') return return_csv('grades_{0}_raw.csv'.format(course_id), - get_student_grade_summary_data(request, course, course_id, get_raw_scores=True)) + get_student_grade_summary_data(request, course, course_id, get_raw_scores=True, use_offline=use_offline)) elif 'Download CSV of answer distributions' in action: track.views.server_track(request, 'dump-answer-dist-csv', {}, page='idashboard') return return_csv('answer_dist_{0}.csv'.format(course_id), get_answers_distribution(request, course_id)) + elif "Reset student's attempts" in action: + # get the form data + unique_student_identifier=request.POST.get('unique_student_identifier','') + problem_to_reset=request.POST.get('problem_to_reset','') + + if problem_to_reset[-4:]==".xml": + problem_to_reset=problem_to_reset[:-4] + + # try to uniquely id student by email address or username + try: + if "@" in unique_student_identifier: + student_to_reset=User.objects.get(email=unique_student_identifier) + else: + student_to_reset=User.objects.get(username=unique_student_identifier) + msg+="Found a single student to reset. " + except: + student_to_reset=None + msg+="Couldn't find student with that email or username. " + + if student_to_reset is not None: + # find the module in question + try: + (org, course_name, run)=course_id.split("/") + module_state_key="i4x://"+org+"/"+course_name+"/problem/"+problem_to_reset + module_to_reset=StudentModule.objects.get(student_id=student_to_reset.id, + course_id=course_id, + module_state_key=module_state_key) + msg+="Found module to reset. " + except Exception as e: + msg+="Couldn't find module with that urlname. " + + # modify the problem's state + try: + # load the state json + problem_state=json.loads(module_to_reset.state) + old_number_of_attempts=problem_state["attempts"] + problem_state["attempts"]=0 + + # save + module_to_reset.state=json.dumps(problem_state) + module_to_reset.save() + track.views.server_track(request, + '{instructor} reset attempts from {old_attempts} to 0 for {student} on problem {problem} in {course}'.format( + old_attempts=old_number_of_attempts, + student=student_to_reset, + problem=module_to_reset.module_state_key, + instructor=request.user, + course=course_id), + {}, + page='idashboard') + msg+="Module state successfully reset!" + except: + msg+="Couldn't reset module state. " + + + elif "Get link to student's progress page" in action: + unique_student_identifier=request.POST.get('unique_student_identifier','') + try: + if "@" in unique_student_identifier: + student_to_reset=User.objects.get(email=unique_student_identifier) + else: + student_to_reset=User.objects.get(username=unique_student_identifier) + progress_url=reverse('student_progress',kwargs={'course_id':course_id,'student_id': student_to_reset.id}) + track.views.server_track(request, + '{instructor} requested progress page for {student} in {course}'.format( + student=student_to_reset, + instructor=request.user, + course=course_id), + {}, + page='idashboard') + msg+=" Progress page for username: {1} with email address: {2}.".format(progress_url,student_to_reset.username,student_to_reset.email) + except: + msg+="Couldn't find student with that username. " + + #---------------------------------------- + # export grades to remote gradebook + + elif action=='List assignments available in remote gradebook': + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-assignments') + msg += msg2 + + elif action=='List assignments available for this course': + log.debug(action) + allgrades = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) + + assignments = [[x] for x in allgrades['assignments']] + datatable = {'header': ['Assignment Name']} + datatable['data'] = assignments + datatable['title'] = action + + msg += 'assignments=
    %s
    ' % assignments + + elif action=='List enrolled students matching remote gradebook': + stud_data = get_student_grade_summary_data(request, course, course_id, get_grades=False, use_offline=use_offline) + msg2, rg_stud_data = _do_remote_gradebook(request.user, course, 'get-membership') + datatable = {'header': ['Student email', 'Match?']} + rg_students = [ x['email'] for x in rg_stud_data['retdata'] ] + def domatch(x): + return 'yes' if x.email in rg_students else 'No' + datatable['data'] = [[x.email, domatch(x)] for x in stud_data['students']] + datatable['title'] = action + + elif action in ['Display grades for assignment', 'Export grades for assignment to remote gradebook', + 'Export CSV file of grades for assignment']: + + log.debug(action) + datatable = {} + aname = request.POST.get('assignment_name','') + if not aname: + msg += "Please enter an assignment name" + else: + allgrades = get_student_grade_summary_data(request, course, course_id, get_grades=True, use_offline=use_offline) + if aname not in allgrades['assignments']: + msg += "Invalid assignment name '%s'" % aname + else: + aidx = allgrades['assignments'].index(aname) + datatable = {'header': ['External email', aname]} + datatable['data'] = [[x.email, x.grades[aidx]] for x in allgrades['students']] + datatable['title'] = 'Grades for assignment "%s"' % aname + + if 'Export CSV' in action: + # generate and return CSV file + return return_csv('grades %s.csv' % aname, datatable) + + elif 'remote gradebook' in action: + fp = StringIO() + return_csv('', datatable, fp=fp) + fp.seek(0) + files = {'datafile': fp} + msg2, dataset = _do_remote_gradebook(request.user, course, 'post-grades', files=files) + msg += msg2 + + #---------------------------------------- # Admin elif 'List course staff' in action: group = get_staff_group(course) msg += 'Staff group = {0}'.format(group.name) - log.debug('staffgrp={0}'.format(group.name)) + datatable = _group_members_table(group, "List of Staff", course_id) + track.views.server_track(request, 'list-staff', {}, page='idashboard') + + elif 'List course instructors' in action and request.user.is_staff: + group = get_instructor_group(course) + msg += 'Instructor group = {0}'.format(group.name) + log.debug('instructor grp={0}'.format(group.name)) uset = group.user_set.all() datatable = {'header': ['Username', 'Full name']} datatable['data'] = [[x.username, x.profile.name] for x in uset] - datatable['title'] = 'List of Staff in course {0}'.format(course_id) - track.views.server_track(request, 'list-staff', {}, page='idashboard') + datatable['title'] = 'List of Instructors in course {0}'.format(course_id) + track.views.server_track(request, 'list-instructors', {}, page='idashboard') elif action == 'Add course staff': uname = request.POST['staffuser'] + group = get_staff_group(course) + msg += add_user_to_group(request, uname, group, 'staff', 'staff') + + elif action == 'Add instructor' and request.user.is_staff: + uname = request.POST['instructor'] try: user = User.objects.get(username=uname) except User.DoesNotExist: msg += 'Error: unknown username "{0}"'.format(uname) user = None if user is not None: - group = get_staff_group(course) - msg += 'Added {0} to staff group = {1}'.format(user, group.name) + group = get_instructor_group(course) + msg += 'Added {0} to instructor group = {1}'.format(user, group.name) log.debug('staffgrp={0}'.format(group.name)) user.groups.add(group) - track.views.server_track(request, 'add-staff {0}'.format(user), {}, page='idashboard') + track.views.server_track(request, 'add-instructor {0}'.format(user), {}, page='idashboard') elif action == 'Remove course staff': uname = request.POST['staffuser'] + group = get_staff_group(course) + msg += remove_user_from_group(request, uname, group, 'staff', 'staff') + + elif action == 'Remove instructor' and request.user.is_staff: + uname = request.POST['instructor'] try: user = User.objects.get(username=uname) except User.DoesNotExist: msg += 'Error: unknown username "{0}"'.format(uname) user = None if user is not None: - group = get_staff_group(course) - msg += 'Removed {0} from staff group = {1}'.format(user, group.name) - log.debug('staffgrp={0}'.format(group.name)) + group = get_instructor_group(course) + msg += 'Removed {0} from instructor group = {1}'.format(user, group.name) + log.debug('instructorgrp={0}'.format(group.name)) user.groups.remove(group) - track.views.server_track(request, 'remove-staff {0}'.format(user), {}, page='idashboard') + track.views.server_track(request, 'remove-instructor {0}'.format(user), {}, page='idashboard') + + #---------------------------------------- + # Group management + + elif 'List beta testers' in action: + group = get_beta_group(course) + msg += 'Beta test group = {0}'.format(group.name) + datatable = _group_members_table(group, "List of beta_testers", course_id) + track.views.server_track(request, 'list-beta-testers', {}, page='idashboard') + + elif action == 'Add beta testers': + users = request.POST['betausers'] + log.debug("users: {0!r}".format(users)) + group = get_beta_group(course) + for username_or_email in _split_by_comma_and_whitespace(users): + msg += "

    {0}

    ".format( + add_user_to_group(request, username_or_email, group, 'beta testers', 'beta-tester')) + + elif action == 'Remove beta testers': + users = request.POST['betausers'] + group = get_beta_group(course) + for username_or_email in _split_by_comma_and_whitespace(users): + msg += "

    {0}

    ".format( + remove_user_from_group(request, username_or_email, group, 'beta testers', 'beta-tester')) #---------------------------------------- # forum administration - + elif action == 'List course forum admins': rolename = FORUM_ROLE_ADMINISTRATOR datatable = {} msg += _list_course_forum_members(course_id, rolename, datatable) track.views.server_track(request, 'list-{0}'.format(rolename), {}, page='idashboard') - - + + elif action == 'Remove forum admin': uname = request.POST['forumadmin'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_REMOVE) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_ADMINISTRATOR, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_ADMINISTRATOR, course_id), {}, page='idashboard') elif action == 'Add forum admin': uname = request.POST['forumadmin'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_ADD) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_ADMINISTRATOR, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_ADMINISTRATOR, course_id), {}, page='idashboard') elif action == 'List course forum moderators': @@ -228,37 +433,102 @@ def instructor_dashboard(request, course_id): datatable = {} msg += _list_course_forum_members(course_id, rolename, datatable) track.views.server_track(request, 'list-{0}'.format(rolename), {}, page='idashboard') - + elif action == 'Remove forum moderator': uname = request.POST['forummoderator'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_REMOVE) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_MODERATOR, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_MODERATOR, course_id), {}, page='idashboard') - + elif action == 'Add forum moderator': uname = request.POST['forummoderator'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_ADD) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_MODERATOR, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_MODERATOR, course_id), {}, page='idashboard') - + elif action == 'List course forum community TAs': rolename = FORUM_ROLE_COMMUNITY_TA datatable = {} msg += _list_course_forum_members(course_id, rolename, datatable) track.views.server_track(request, 'list-{0}'.format(rolename), {}, page='idashboard') - + elif action == 'Remove forum community TA': uname = request.POST['forummoderator'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_REMOVE) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_COMMUNITY_TA, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_REMOVE, uname, FORUM_ROLE_COMMUNITY_TA, course_id), {}, page='idashboard') - + elif action == 'Add forum community TA': uname = request.POST['forummoderator'] msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_ADD) - track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_COMMUNITY_TA, course_id), + track.views.server_track(request, '{0} {1} as {2} for {3}'.format(FORUM_ROLE_ADD, uname, FORUM_ROLE_COMMUNITY_TA, course_id), {}, page='idashboard') + #---------------------------------------- + # enrollment + + elif action == 'List students who may enroll but may not have yet signed up': + ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_id) + datatable = {'header': ['StudentEmail']} + datatable['data'] = [[x.email] for x in ceaset] + datatable['title'] = action + + elif action == 'Enroll student': + + student = request.POST.get('enstudent','') + ret = _do_enroll_students(course, course_id, student) + datatable = ret['datatable'] + + elif action == 'Un-enroll student': + + student = request.POST.get('enstudent','') + datatable = {} + isok = False + cea = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=student) + if cea: + cea.delete() + msg += "Un-enrolled student with email '%s'" % student + isok = True + try: + nce = CourseEnrollment.objects.get(user=User.objects.get(email=student), course_id=course_id) + nce.delete() + msg += "Un-enrolled student with email '%s'" % student + except Exception as err: + if not isok: + msg += "Error! Failed to un-enroll student with email '%s'\n" % student + msg += str(err) + '\n' + + elif action == 'Un-enroll ALL students': + + ret = _do_enroll_students(course, course_id, '', overload=True) + datatable = ret['datatable'] + + elif action == 'Enroll multiple students': + + students = request.POST.get('enroll_multiple','') + ret = _do_enroll_students(course, course_id, students) + datatable = ret['datatable'] + + elif action == 'List sections available in remote gradebook': + + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-sections') + msg += msg2 + + elif action in ['List students in section in remote gradebook', + 'Overload enrollment list using remote gradebook', + 'Merge enrollment list with remote gradebook']: + + section = request.POST.get('gradebook_section','') + msg2, datatable = _do_remote_gradebook(request.user, course, 'get-membership', dict(section=section) ) + msg += msg2 + + if not 'List' in action: + students = ','.join([x['email'] for x in datatable['retdata']]) + overload = 'Overload' in action + ret = _do_enroll_students(course, course_id, students, overload=overload) + datatable = ret['datatable'] + + #---------------------------------------- # psychometrics @@ -334,8 +604,15 @@ def instructor_dashboard(request, course_id): req = requests.get(settings.ANALYTICS_SERVER_URL + "get_analytics?aname=DailyActivityAnalyzer&course_id=%s&from=%s&to=%s" % (course_id,from_day, to_day)) daily_activity_json = req.json + #---------------------------------------- + # offline grades? + + if use_offline: + msg += "
    Grades from %s" % offline_grades_available(course_id) + #---------------------------------------- # context for rendering + context = {'course': course, 'staff_access': True, 'admin_access': request.user.is_staff, @@ -355,20 +632,70 @@ def instructor_dashboard(request, course_id): 'overall_grade_distribution' : overall_grade_distribution, 'dropoff_per_day' : dropoff_per_day, 'attempted_problems' : attempted_problems, + 'mitx_version' : getattr(settings,'MITX_VERSION_STRING',''), + 'offline_grade_log' : offline_grades_available(course_id), } return render_to_response('courseware/instructor_dashboard.html', context) + +def _do_remote_gradebook(user, course, action, args=None, files=None): + ''' + Perform remote gradebook action. Returns msg, datatable. + ''' + rg = course.metadata.get('remote_gradebook','') + if not rg: + msg = "No remote gradebook defined in course metadata" + return msg, {} + + rgurl = settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') + if not rgurl: + msg = "No remote gradebook url defined in settings.MITX_FEATURES" + return msg, {} + + rgname = rg.get('name','') + if not rgname: + msg = "No gradebook name defined in course remote_gradebook metadata" + return msg, {} + + if args is None: + args = {} + data = dict(submit=action, gradebook=rgname, user=user.email) + data.update(args) + + try: + resp = requests.post(rgurl, data=data, verify=False, files=files) + retdict = json.loads(resp.content) + except Exception as err: + msg = "Failed to communicate with gradebook server at %s
    " % rgurl + msg += "Error: %s" % err + msg += "
    resp=%s" % resp.content + msg += "
    data=%s" % data + return msg, {} + + msg = '
    %s
    ' % retdict['msg'].replace('\n','
    ') + retdata = retdict['data'] # a list of dicts + + if retdata: + datatable = {'header': retdata[0].keys()} + datatable['data'] = [x.values() for x in retdata] + datatable['title'] = 'Remote gradebook response for %s' % action + datatable['retdata'] = retdata + else: + datatable = {} + + return msg, datatable + def _list_course_forum_members(course_id, rolename, datatable): - ''' + """ Fills in datatable with forum membership information, for a given role, so that it will be displayed on instructor dashboard. - - course_ID = course's ID string + + course_ID = the ID string for a course rolename = one of "Administrator", "Moderator", "Community TA" - + Returns message status string to append to displayed message, if role is unknown. - ''' + """ # make sure datatable is set up properly for display first, before checking for errors datatable['header'] = ['Username', 'Full name', 'Roles'] datatable['title'] = 'List of Forum {0}s in course {1}'.format(rolename, course_id) @@ -387,13 +714,13 @@ def _list_course_forum_members(course_id, rolename, datatable): def _update_forum_role_membership(uname, course, rolename, add_or_remove): ''' Supports adding a user to a course's forum role - + uname = username string for user - course = course object + course = course object rolename = one of "Administrator", "Moderator", "Community TA" add_or_remove = one of "add" or "remove" - - Returns message status string to append to displayed message, Status is returned if user + + Returns message status string to append to displayed message, Status is returned if user or role is unknown, or if entry already exists when adding, or if entry doesn't exist when removing. ''' # check that username and rolename are valid: @@ -413,23 +740,107 @@ def _update_forum_role_membership(uname, course, rolename, add_or_remove): if add_or_remove == FORUM_ROLE_REMOVE: if not alreadyexists: msg ='Error: user "{0}" does not have rolename "{1}", cannot remove'.format(uname, rolename) - else: + else: user.roles.remove(role) msg = 'Removed "{0}" from "{1}" forum role = "{2}"'.format(user, course.id, rolename) else: if alreadyexists: msg = 'Error: user "{0}" already has rolename "{1}", cannot add'.format(uname, rolename) - else: - if (rolename == FORUM_ROLE_ADMINISTRATOR and not has_access(user, course, 'staff')): + else: + if (rolename == FORUM_ROLE_ADMINISTRATOR and not has_access(user, course, 'staff')): msg = 'Error: user "{0}" should first be added as staff before adding as a forum administrator, cannot add'.format(uname) else: user.roles.add(role) msg = 'Added "{0}" to "{1}" forum role = "{2}"'.format(user, course.id, rolename) return msg - -def get_student_grade_summary_data(request, course, course_id, get_grades=True, get_raw_scores=False): +def _group_members_table(group, title, course_id): + """ + Return a data table of usernames and names of users in group_name. + + Arguments: + group -- a django group. + title -- a descriptive title to show the user + + Returns: + a dictionary with keys + 'header': ['Username', 'Full name'], + 'data': [[username, name] for all users] + 'title': "{title} in course {course}" + """ + uset = group.user_set.all() + datatable = {'header': ['Username', 'Full name']} + datatable['data'] = [[x.username, x.profile.name] for x in uset] + datatable['title'] = '{0} in course {1}'.format(title, course_id) + return datatable + + +def _add_or_remove_user_group(request, username_or_email, group, group_title, event_name, do_add): + """ + Implementation for both add and remove functions, to get rid of shared code. do_add is bool that determines which + to do. + """ + user = None + try: + if '@' in username_or_email: + user = User.objects.get(email=username_or_email) + else: + user = User.objects.get(username=username_or_email) + except User.DoesNotExist: + msg = 'Error: unknown username or email "{0}"'.format(username_or_email) + user = None + + if user is not None: + action = "Added" if do_add else "Removed" + prep = "to" if do_add else "from" + msg = '{action} {0} {prep} {1} group = {2}'.format(user, group_title, group.name, + action=action, prep=prep) + if do_add: + user.groups.add(group) + else: + user.groups.remove(group) + event = "add" if do_add else "remove" + track.views.server_track(request, '{event}-{0} {1}'.format(event_name, user, event=event), + {}, page='idashboard') + + return msg + + +def add_user_to_group(request, username_or_email, group, group_title, event_name): + """ + Look up the given user by username (if no '@') or email (otherwise), and add them to group. + + Arguments: + request: django request--used for tracking log + username_or_email: who to add. Decide if it's an email by presense of an '@' + group: django group object + group_title: what to call this group in messages to user--e.g. "beta-testers". + event_name: what to call this event when logging to tracking logs. + + Returns: + html to insert in the message field + """ + return _add_or_remove_user_group(request, username_or_email, group, group_title, event_name, True) + +def remove_user_from_group(request, username_or_email, group, group_title, event_name): + """ + Look up the given user by username (if no '@') or email (otherwise), and remove them from group. + + Arguments: + request: django request--used for tracking log + username_or_email: who to remove. Decide if it's an email by presense of an '@' + group: django group object + group_title: what to call this group in messages to user--e.g. "beta-testers". + event_name: what to call this event when logging to tracking logs. + + Returns: + html to insert in the message field + """ + return _add_or_remove_user_group(request, username_or_email, group, group_title, event_name, False) + + +def get_student_grade_summary_data(request, course, course_id, get_grades=True, get_raw_scores=False, use_offline=False): ''' Return data arrays with student identity and grades for specified course. @@ -450,16 +861,18 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True, enrolled_students = User.objects.filter(courseenrollment__course_id=course_id).prefetch_related("groups").order_by('username') header = ['ID', 'Username', 'Full Name', 'edX email', 'External email'] - if get_grades: + assignments = [] + if get_grades and enrolled_students.count() > 0: # just to construct the header - gradeset = grades.grade(enrolled_students[0], request, course, keep_raw_scores=get_raw_scores) + gradeset = student_grades(enrolled_students[0], request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline) # log.debug('student {0} gradeset {1}'.format(enrolled_students[0], gradeset)) if get_raw_scores: - header += [score.section for score in gradeset['raw_scores']] + assignments += [score.section for score in gradeset['raw_scores']] else: - header += [x['label'] for x in gradeset['section_breakdown']] + assignments += [x['label'] for x in gradeset['section_breakdown']] + header += assignments - datatable = {'header': header} + datatable = {'header': header, 'assignments': assignments, 'students': enrolled_students} data = [] for student in enrolled_students: @@ -470,17 +883,21 @@ def get_student_grade_summary_data(request, course, course_id, get_grades=True, datarow.append('') if get_grades: - gradeset = grades.grade(student, request, course, keep_raw_scores=get_raw_scores) - # log.debug('student={0}, gradeset={1}'.format(student,gradeset)) + gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline) + log.debug('student={0}, gradeset={1}'.format(student,gradeset)) if get_raw_scores: - datarow += [score.earned for score in gradeset['raw_scores']] + # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned'] + sgrades = [(getattr(score,'earned','') or score[0]) for score in gradeset['raw_scores']] else: - datarow += [x['percent'] for x in gradeset['section_breakdown']] + sgrades = [x['percent'] for x in gradeset['section_breakdown']] + datarow += sgrades + student.grades = sgrades # store in student object data.append(datarow) datatable['data'] = data return datatable +#----------------------------------------------------------------------------- @cache_control(no_cache=True, no_store=True, must_revalidate=True) def gradebook(request, course_id): @@ -499,7 +916,7 @@ def gradebook(request, course_id): student_info = [{'username': student.username, 'id': student.id, 'email': student.email, - 'grade_summary': grades.grade(student, request, course), + 'grade_summary': student_grades(student, request, course), 'realname': student.profile.name, } for student in enrolled_students] @@ -522,6 +939,80 @@ def grade_summary(request, course_id): return render_to_response('courseware/grade_summary.html', context) +#----------------------------------------------------------------------------- +# enrollment + + +def _split_by_comma_and_whitespace(s): + """ + Split a string both by on commas and whitespice. + """ + # Note: split() with no args removes empty strings from output + lists = [x.split() for x in s.split(',')] + # return all of them + return itertools.chain(*lists) + +def _do_enroll_students(course, course_id, students, overload=False): + """Do the actual work of enrolling multiple students, presented as a string + of emails separated by commas or returns""" + + new_students = _split_by_comma_and_whitespace(students) + new_students = [str(s.strip()) for s in new_students] + new_students_lc = [x.lower() for x in new_students] + + if '' in new_students: + new_students.remove('') + + status = dict([x,'unprocessed'] for x in new_students) + + if overload: # delete all but staff + todelete = CourseEnrollment.objects.filter(course_id=course_id) + for ce in todelete: + if not has_access(ce.user, course, 'staff') and ce.user.email.lower() not in new_students_lc: + status[ce.user.email] = 'deleted' + ce.delete() + else: + status[ce.user.email] = 'is staff' + ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_id) + for cea in ceaset: + status[cea.email] = 'removed from pending enrollment list' + ceaset.delete() + + for student in new_students: + try: + user=User.objects.get(email=student) + except User.DoesNotExist: + # user not signed up yet, put in pending enrollment allowed table + if CourseEnrollmentAllowed.objects.filter(email=student, course_id=course_id): + status[student] = 'user does not exist, enrollment already allowed, pending' + continue + cea = CourseEnrollmentAllowed(email=student, course_id=course_id) + cea.save() + status[student] = 'user does not exist, enrollment allowed, pending' + continue + + if CourseEnrollment.objects.filter(user=user, course_id=course_id): + status[student] = 'already enrolled' + continue + try: + nce = CourseEnrollment(user=user, course_id=course_id) + nce.save() + status[student] = 'added' + except: + status[student] = 'rejected' + + datatable = {'header': ['StudentEmail', 'action']} + datatable['data'] = [[x, status[x]] for x in status] + datatable['title'] = 'Enrollment of students' + + def sf(stat): return [x for x in status if status[x]==stat] + + data = dict(added=sf('added'), rejected=sf('rejected')+sf('exists'), + deleted=sf('deleted'), datatable=datatable) + + return data + + @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) def enroll_students(request, course_id): @@ -540,22 +1031,10 @@ def enroll_students(request, course_id): course = get_course_with_access(request.user, course_id, 'staff') existing_students = [ce.user.email for ce in CourseEnrollment.objects.filter(course_id=course_id)] - if 'new_students' in request.POST: - new_students = request.POST['new_students'].split('\n') - else: - new_students = [] - new_students = [s.strip() for s in new_students] - - added_students = [] - rejected_students = [] - - for student in new_students: - try: - nce = CourseEnrollment(user=User.objects.get(email=student), course_id=course_id) - nce.save() - added_students.append(student) - except: - rejected_students.append(student) + new_students = request.POST.get('new_students') + ret = _do_enroll_students(course, course_id, new_students) + added_students = ret['added'] + rejected_students = ret['rejected'] return render_to_response("enroll_students.html", {'course': course_id, 'existing_students': existing_students, @@ -564,6 +1043,9 @@ def enroll_students(request, course_id): 'debug': new_students}) +#----------------------------------------------------------------------------- +# answer distribution + def get_answers_distribution(request, course_id): """ Get the distribution of answers for all graded problems in the course. diff --git a/lms/djangoapps/open_ended_grading/__init__.py b/lms/djangoapps/open_ended_grading/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/open_ended_grading/grading_service.py b/lms/djangoapps/open_ended_grading/grading_service.py new file mode 100644 index 0000000000..7362411daa --- /dev/null +++ b/lms/djangoapps/open_ended_grading/grading_service.py @@ -0,0 +1,100 @@ +# This class gives a common interface for logging into the grading controller +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys + +from django.conf import settings +from django.http import HttpResponse, Http404 + +from courseware.access import has_access +from util.json_request import expect_json +from xmodule.course_module import CourseDescriptor + +log = logging.getLogger(__name__) + +class GradingServiceError(Exception): + pass + +class GradingService(object): + """ + Interface to staff grading backend. + """ + def __init__(self, config): + self.username = config['username'] + self.password = config['password'] + self.url = config['url'] + self.login_url = self.url + '/login/' + self.session = requests.session() + + def _login(self): + """ + Log into the staff grading service. + + Raises requests.exceptions.HTTPError if something goes wrong. + + Returns the decoded json dict of the response. + """ + response = self.session.post(self.login_url, + {'username': self.username, + 'password': self.password,}) + + response.raise_for_status() + + return response.json + + def post(self, url, data, allow_redirects=False): + """ + Make a post request to the grading controller + """ + try: + op = lambda: self.session.post(url, data=data, + allow_redirects=allow_redirects) + r = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError) as err: + # reraise as promised GradingServiceError, but preserve stacktrace. + raise GradingServiceError, str(err), sys.exc_info()[2] + + return r.text + + def get(self, url, params, allow_redirects=False): + """ + Make a get request to the grading controller + """ + log.debug(params) + op = lambda: self.session.get(url, + allow_redirects=allow_redirects, + params=params) + try: + r = self._try_with_login(op) + except (RequestException, ConnectionError, HTTPError) as err: + # reraise as promised GradingServiceError, but preserve stacktrace. + raise GradingServiceError, str(err), sys.exc_info()[2] + + return r.text + + + def _try_with_login(self, operation): + """ + Call operation(), which should return a requests response object. If + the request fails with a 'login_required' error, call _login() and try + the operation again. + + Returns the result of operation(). Does not catch exceptions. + """ + response = operation() + if (response.json + and response.json.get('success') == False + and response.json.get('error') == 'login_required'): + # apparrently we aren't logged in. Try to fix that. + r = self._login() + if r and not r.get('success'): + log.warning("Couldn't log into staff_grading backend. Response: %s", + r) + # try again + response = operation() + response.raise_for_status() + + return response + diff --git a/lms/djangoapps/open_ended_grading/peer_grading_service.py b/lms/djangoapps/open_ended_grading/peer_grading_service.py new file mode 100644 index 0000000000..8e0f8cbbaa --- /dev/null +++ b/lms/djangoapps/open_ended_grading/peer_grading_service.py @@ -0,0 +1,361 @@ +""" +This module provides an interface on the grading-service backend +for peer grading + +Use peer_grading_service() to get the version specified +in settings.PEER_GRADING_INTERFACE + +""" +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys + +from django.conf import settings +from django.http import HttpResponse, Http404 +from grading_service import GradingService +from grading_service import GradingServiceError + +from courseware.access import has_access +from util.json_request import expect_json +from xmodule.course_module import CourseDescriptor +from student.models import unique_id_for_user + +log = logging.getLogger(__name__) + +""" +This is a mock peer grading service that can be used for unit tests +without making actual service calls to the grading controller +""" +class MockPeerGradingService(object): + def get_next_submission(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': "", + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_grade(self, location, grader_id, submission_id, + score, feedback, submission_key): + return json.dumps({'success': True}) + + def is_student_calibrated(self, problem_location, grader_id): + return json.dumps({'success': True, 'calibrated': True}) + + def show_calibration_essay(self, problem_location, grader_id): + return json.dumps({'success': True, + 'submission_id':1, + 'submission_key': '', + 'student_response': 'fake student response', + 'prompt': 'fake submission prompt', + 'rubric': 'fake rubric', + 'max_score': 4}) + + def save_calibration_essay(self, problem_location, grader_id, + calibration_essay_id, submission_key, score, feedback): + return {'success': True, 'actual_score': 2} + + def get_problem_list(self, course_id, grader_id): + return json.dumps({'success': True, + 'problem_list': [ + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', + 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}), + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', + 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5}) + ]}) + +class PeerGradingService(GradingService): + """ + Interface with the grading controller for peer grading + """ + def __init__(self, config): + super(PeerGradingService, self).__init__(config) + self.get_next_submission_url = self.url + '/get_next_submission/' + self.save_grade_url = self.url + '/save_grade/' + self.is_student_calibrated_url = self.url + '/is_student_calibrated/' + self.show_calibration_essay_url = self.url + '/show_calibration_essay/' + self.save_calibration_essay_url = self.url + '/save_calibration_essay/' + self.get_problem_list_url = self.url + '/get_problem_list/' + self.get_notifications_url = self.url + '/get_notifications/' + + def get_next_submission(self, problem_location, grader_id): + response = self.get(self.get_next_submission_url, + {'location': problem_location, 'grader_id': grader_id}) + return response + + def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key): + data = {'grader_id' : grader_id, + 'submission_id' : submission_id, + 'score' : score, + 'feedback' : feedback, + 'submission_key': submission_key, + 'location': location} + return self.post(self.save_grade_url, data) + + def is_student_calibrated(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + return self.get(self.is_student_calibrated_url, params) + + def show_calibration_essay(self, problem_location, grader_id): + params = {'problem_id' : problem_location, 'student_id': grader_id} + return self.get(self.show_calibration_essay_url, params) + + def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key, score, feedback): + data = {'location': problem_location, + 'student_id': grader_id, + 'calibration_essay_id': calibration_essay_id, + 'submission_key': submission_key, + 'score': score, + 'feedback': feedback} + return self.post(self.save_calibration_essay_url, data) + + def get_problem_list(self, course_id, grader_id): + params = {'course_id': course_id, 'student_id': grader_id} + response = self.get(self.get_problem_list_url, params) + return response + + def get_notifications(self, course_id, grader_id): + params = {'course_id': course_id, 'student_id': grader_id} + response = self.get(self.get_notifications_url, params) + return response + + +_service = None +def peer_grading_service(): + """ + Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True, + returns a mock one, otherwise a real one. + + Caches the result, so changing the setting after the first call to this + function will have no effect. + """ + global _service + if _service is not None: + return _service + + if settings.MOCK_PEER_GRADING: + _service = MockPeerGradingService() + else: + _service = PeerGradingService(settings.PEER_GRADING_INTERFACE) + + return _service + +def _err_response(msg): + """ + Return a HttpResponse with a json dump with success=False, and the given error message. + """ + return HttpResponse(json.dumps({'success': False, 'error': msg}), + mimetype="application/json") + +def _check_required(request, required): + actual = set(request.POST.keys()) + missing = required - actual + if len(missing) > 0: + return False, "Missing required keys: {0}".format(', '.join(missing)) + else: + return True, "" + +def _check_post(request): + if request.method != 'POST': + raise Http404 + + +def get_next_submission(request, course_id): + """ + Makes a call to the grading controller for the next essay that should be graded + Returns a json dict with the following keys: + + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + """ + _check_post(request) + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + + try: + response = peer_grading_service().get_next_submission(location, grader_id) + return HttpResponse(response, + mimetype="application/json") + except GradingServiceError: + log.exception("Error getting next submission. server url: {0} location: {1}, grader_id: {2}" + .format(staff_grading_service().url, location, grader_id)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + +def save_grade(request, course_id): + """ + Saves the grade of a given submission. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + """ + _check_post(request) + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + submission_id = p['submission_id'] + score = p['score'] + feedback = p['feedback'] + submission_key = p['submission_key'] + try: + response = peer_grading_service().save_grade(location, grader_id, submission_id, + score, feedback, submission_key) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("""Error saving grade. server url: {0}, location: {1}, submission_id:{2}, + submission_key: {3}, score: {4}""" + .format(staff_grading_service().url, + location, submission_id, submission_key, score) + ) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + + +def is_student_calibrated(request, course_id): + """ + Calls the grading controller to see if the given student is calibrated + on the given problem + + Input: + In the request, we need the following arguments: + location - problem location + + Returns: + Json object with the following keys + success - bool indicating whether or not the call was successful + calibrated - true if the grader has fully calibrated and can now move on to grading + - false if the grader is still working on calibration problems + total_calibrated_on_so_far - the number of calibration essays for this problem + that this grader has graded + """ + _check_post(request) + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + + try: + response = peer_grading_service().is_student_calibrated(location, grader_id) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, grader_id: {0}, location: {1}" + .format(staff_grading_service().url, grader_id, location)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + + +def show_calibration_essay(request, course_id): + """ + Fetch the next calibration essay from the grading controller and return it + Inputs: + In the request + location - problem location + + Returns: + A json dict with the following keys + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'submission_key': a key associated with the submission for validation reasons + + 'error': if success is False, will have an error message with more info. + + """ + _check_post(request) + + required = set(['location']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + try: + response = peer_grading_service().show_calibration_essay(location, grader_id) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error from grading service. server url: {0}, location: {0}" + .format(staff_grading_service().url, location)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + +def save_calibration_essay(request, course_id): + """ + Saves the grader's grade of a given calibration. + Input: + The request should have the following keys: + location - problem location + submission_id - id associated with this submission + submission_key - submission key given for validation purposes + score - the grade that was given to the submission + feedback - the feedback from the student + Returns + A json object with the following keys: + success: bool indicating whether the save was a success + error: if there was an error in the submission, this is the error message + actual_score: the score that the instructor gave to this calibration essay + + """ + _check_post(request) + + required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback']) + success, message = _check_required(request, required) + if not success: + return _err_response(message) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + calibration_essay_id = p['submission_id'] + submission_key = p['submission_key'] + score = p['score'] + feedback = p['feedback'] + + try: + response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id, submission_key, score, feedback) + return HttpResponse(response, mimetype="application/json") + except GradingServiceError: + log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id)) + return _err_response('Could not connect to grading service') diff --git a/lms/djangoapps/open_ended_grading/staff_grading.py b/lms/djangoapps/open_ended_grading/staff_grading.py new file mode 100644 index 0000000000..7a48b25a49 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/staff_grading.py @@ -0,0 +1,25 @@ +""" +LMS part of instructor grading: + +- views + ajax handling +- calls the instructor grading service +""" + +import json +import logging + +log = logging.getLogger(__name__) + + +class StaffGrading(object): + """ + Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views. + """ + def __init__(self, course): + self.course = course + + def get_html(self): + return "Instructor grading!" + # context = {} + # return render_to_string('courseware/instructor_grading_view.html', context) + diff --git a/lms/djangoapps/open_ended_grading/staff_grading_service.py b/lms/djangoapps/open_ended_grading/staff_grading_service.py new file mode 100644 index 0000000000..0db17e2ef4 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/staff_grading_service.py @@ -0,0 +1,326 @@ +""" +This module provides views that proxy to the staff grading backend service. +""" + +import json +import logging +import requests +from requests.exceptions import RequestException, ConnectionError, HTTPError +import sys +from grading_service import GradingService +from grading_service import GradingServiceError + +from django.conf import settings +from django.http import HttpResponse, Http404 + +from courseware.access import has_access +from util.json_request import expect_json +from xmodule.course_module import CourseDescriptor +from student.models import unique_id_for_user + +log = logging.getLogger(__name__) + + + +class MockStaffGradingService(object): + """ + A simple mockup of a staff grading service, testing. + """ + def __init__(self): + self.cnt = 0 + + def get_next(self,course_id, location, grader_id): + self.cnt += 1 + return json.dumps({'success': True, + 'submission_id': self.cnt, + 'submission': 'Test submission {cnt}'.format(cnt=self.cnt), + 'num_graded': 3, + 'min_for_ml': 5, + 'num_pending': 4, + 'prompt': 'This is a fake prompt', + 'ml_error_info': 'ML info', + 'max_score': 2 + self.cnt % 3, + 'rubric': 'A rubric'}) + + def get_problem_list(self, course_id, grader_id): + self.cnt += 1 + return json.dumps({'success': True, + 'problem_list': [ + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1', \ + 'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5, 'min_for_ml': 10}), + json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2', \ + 'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5, 'min_for_ml': 10}) + ]}) + + + def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped): + return self.get_next(course_id, 'fake location', grader_id) + + +class StaffGradingService(GradingService): + """ + Interface to staff grading backend. + """ + def __init__(self, config): + super(StaffGradingService, self).__init__(config) + self.get_next_url = self.url + '/get_next_submission/' + self.save_grade_url = self.url + '/save_grade/' + self.get_problem_list_url = self.url + '/get_problem_list/' + self.get_notifications_url = self.url + "/get_notifications/" + + + def get_problem_list(self, course_id, grader_id): + """ + Get the list of problems for a given course. + + Args: + course_id: course id that we want the problems of + grader_id: who is grading this? The anonymous user_id of the grader. + + Returns: + json string with the response from the service. (Deliberately not + writing out the fields here--see the docs on the staff_grading view + in the grading_controller repo) + + Raises: + GradingServiceError: something went wrong with the connection. + """ + params = {'course_id': course_id,'grader_id': grader_id} + return self.get(self.get_problem_list_url, params) + + + def get_next(self, course_id, location, grader_id): + """ + Get the next thing to grade. + + Args: + course_id: the course that this problem belongs to + location: location of the problem that we are grading and would like the + next submission for + grader_id: who is grading this? The anonymous user_id of the grader. + + Returns: + json string with the response from the service. (Deliberately not + writing out the fields here--see the docs on the staff_grading view + in the grading_controller repo) + + Raises: + GradingServiceError: something went wrong with the connection. + """ + return self.get(self.get_next_url, + params={'location': location, + 'grader_id': grader_id}) + + + def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped): + """ + Save a score and feedback for a submission. + + Returns: + json dict with keys + 'success': bool + 'error': error msg, if something went wrong. + + Raises: + GradingServiceError if there's a problem connecting. + """ + data = {'course_id': course_id, + 'submission_id': submission_id, + 'score': score, + 'feedback': feedback, + 'grader_id': grader_id, + 'skipped': skipped} + + return self.post(self.save_grade_url, data=data) + + def get_notifications(self, course_id): + params = {'course_id': course_id} + response = self.get(self.get_notifications_url, params) + return response + + +# don't initialize until staff_grading_service() is called--means that just +# importing this file doesn't create objects that may not have the right config +_service = None + +def staff_grading_service(): + """ + Return a staff grading service instance--if settings.MOCK_STAFF_GRADING is True, + returns a mock one, otherwise a real one. + + Caches the result, so changing the setting after the first call to this + function will have no effect. + """ + global _service + if _service is not None: + return _service + + if settings.MOCK_STAFF_GRADING: + _service = MockStaffGradingService() + else: + _service = StaffGradingService(settings.STAFF_GRADING_INTERFACE) + + return _service + +def _err_response(msg): + """ + Return a HttpResponse with a json dump with success=False, and the given error message. + """ + return HttpResponse(json.dumps({'success': False, 'error': msg}), + mimetype="application/json") + + +def _check_access(user, course_id): + """ + Raise 404 if user doesn't have staff access to course_id + """ + course_location = CourseDescriptor.id_to_location(course_id) + if not has_access(user, course_location, 'staff'): + raise Http404 + + return + + +def get_next(request, course_id): + """ + Get the next thing to grade for course_id and with the location specified + in the request. + + Returns a json dict with the following keys: + + 'success': bool + + 'submission_id': a unique identifier for the submission, to be passed back + with the grade. + + 'submission': the submission, rendered as read-only html for grading + + 'rubric': the rubric, also rendered as html. + + 'message': if there was no submission available, but nothing went wrong, + there will be a message field. + + 'error': if success is False, will have an error message with more info. + """ + _check_access(request.user, course_id) + + required = set(['location']) + if request.method != 'POST': + raise Http404 + actual = set(request.POST.keys()) + missing = required - actual + if len(missing) > 0: + return _err_response('Missing required keys {0}'.format( + ', '.join(missing))) + grader_id = unique_id_for_user(request.user) + p = request.POST + location = p['location'] + + return HttpResponse(_get_next(course_id, grader_id, location), + mimetype="application/json") + + +def get_problem_list(request, course_id): + """ + Get all the problems for the given course id + + Returns a json dict with the following keys: + success: bool + + problem_list: a list containing json dicts with the following keys: + each dict represents a different problem in the course + + location: the location of the problem + + problem_name: the name of the problem + + num_graded: the number of responses that have been graded + + num_pending: the number of responses that are sitting in the queue + + min_for_ml: the number of responses that need to be graded before + the ml can be run + + """ + _check_access(request.user, course_id) + try: + response = staff_grading_service().get_problem_list(course_id, unique_id_for_user(request.user)) + return HttpResponse(response, + mimetype="application/json") + except GradingServiceError: + log.exception("Error from grading service. server url: {0}" + .format(staff_grading_service().url)) + return HttpResponse(json.dumps({'success': False, + 'error': 'Could not connect to grading service'})) + + +def _get_next(course_id, grader_id, location): + """ + Implementation of get_next (also called from save_grade) -- returns a json string + """ + try: + return staff_grading_service().get_next(course_id, location, grader_id) + except GradingServiceError: + log.exception("Error from grading service. server url: {0}" + .format(staff_grading_service().url)) + return json.dumps({'success': False, + 'error': 'Could not connect to grading service'}) + + +@expect_json +def save_grade(request, course_id): + """ + Save the grade and feedback for a submission, and, if all goes well, return + the next thing to grade. + + Expects the following POST parameters: + 'score': int + 'feedback': string + 'submission_id': int + + Returns the same thing as get_next, except that additional error messages + are possible if something goes wrong with saving the grade. + """ + _check_access(request.user, course_id) + + if request.method != 'POST': + raise Http404 + + required = set(['score', 'feedback', 'submission_id', 'location']) + actual = set(request.POST.keys()) + missing = required - actual + if len(missing) > 0: + return _err_response('Missing required keys {0}'.format( + ', '.join(missing))) + + grader_id = unique_id_for_user(request.user) + p = request.POST + + + location = p['location'] + skipped = 'skipped' in p + try: + result_json = staff_grading_service().save_grade(course_id, + grader_id, + p['submission_id'], + p['score'], + p['feedback'], + skipped) + except GradingServiceError: + log.exception("Error saving grade") + return _err_response('Could not connect to grading service') + + try: + result = json.loads(result_json) + except ValueError: + log.exception("save_grade returned broken json: %s", result_json) + return _err_response('Grading service returned mal-formatted data.') + + if not result.get('success', False): + log.warning('Got success=False from grading service. Response: %s', result_json) + return _err_response('Grading service failed') + + # Ok, save_grade seemed to work. Get the next submission to grade. + return HttpResponse(_get_next(course_id, grader_id, location), + mimetype="application/json") + diff --git a/lms/djangoapps/open_ended_grading/tests.py b/lms/djangoapps/open_ended_grading/tests.py new file mode 100644 index 0000000000..0c4376a44b --- /dev/null +++ b/lms/djangoapps/open_ended_grading/tests.py @@ -0,0 +1,112 @@ +""" +Tests for open ended grading interfaces + +django-admin.py test --settings=lms.envs.test --pythonpath=. lms/djangoapps/open_ended_grading +""" + +from django.test import TestCase +from open_ended_grading import staff_grading_service +from django.core.urlresolvers import reverse +from django.contrib.auth.models import Group + +from courseware.access import _course_staff_group_name +import courseware.tests.tests as ct +from xmodule.modulestore.django import modulestore +import xmodule.modulestore.django +from nose import SkipTest +from mock import patch, Mock +import json + +from override_settings import override_settings + +_mock_service = staff_grading_service.MockStaffGradingService() + +@override_settings(MODULESTORE=ct.TEST_DATA_XML_MODULESTORE) +class TestStaffGradingService(ct.PageLoader): + ''' + Check that staff grading service proxy works. Basically just checking the + access control and error handling logic -- all the actual work is on the + backend. + ''' + def setUp(self): + xmodule.modulestore.django._MODULESTORES = {} + + self.student = 'view@test.com' + self.instructor = 'view2@test.com' + self.password = 'foo' + self.location = 'TestLocation' + self.create_account('u1', self.student, self.password) + self.create_account('u2', self.instructor, self.password) + self.activate_user(self.student) + self.activate_user(self.instructor) + + self.course_id = "edX/toy/2012_Fall" + self.toy = modulestore().get_course(self.course_id) + def make_instructor(course): + group_name = _course_staff_group_name(course.location) + g = Group.objects.create(name=group_name) + g.user_set.add(ct.user(self.instructor)) + + make_instructor(self.toy) + + self.mock_service = staff_grading_service.staff_grading_service() + + self.logout() + + def test_access(self): + """ + Make sure only staff have access. + """ + self.login(self.student, self.password) + + # both get and post should return 404 + for view_name in ('staff_grading_get_next', 'staff_grading_save_grade'): + url = reverse(view_name, kwargs={'course_id': self.course_id}) + self.check_for_get_code(404, url) + self.check_for_post_code(404, url) + + + def test_get_next(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_get_next', kwargs={'course_id': self.course_id}) + data = {'location': self.location} + + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success']) + self.assertEquals(d['submission_id'], self.mock_service.cnt) + self.assertIsNotNone(d['submission']) + self.assertIsNotNone(d['num_graded']) + self.assertIsNotNone(d['min_for_ml']) + self.assertIsNotNone(d['num_pending']) + self.assertIsNotNone(d['prompt']) + self.assertIsNotNone(d['ml_error_info']) + self.assertIsNotNone(d['max_score']) + self.assertIsNotNone(d['rubric']) + + + def test_save_grade(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_save_grade', kwargs={'course_id': self.course_id}) + + data = {'score': '12', + 'feedback': 'great!', + 'submission_id': '123', + 'location': self.location} + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success'], str(d)) + self.assertEquals(d['submission_id'], self.mock_service.cnt) + + def test_get_problem_list(self): + self.login(self.instructor, self.password) + + url = reverse('staff_grading_get_problem_list', kwargs={'course_id': self.course_id}) + data = {} + + r = self.check_for_post_code(200, url, data) + d = json.loads(r.content) + self.assertTrue(d['success'], str(d)) + self.assertIsNotNone(d['problem_list']) diff --git a/lms/djangoapps/open_ended_grading/views.py b/lms/djangoapps/open_ended_grading/views.py new file mode 100644 index 0000000000..858c9a4fd5 --- /dev/null +++ b/lms/djangoapps/open_ended_grading/views.py @@ -0,0 +1,118 @@ +# Grading Views + +import logging +import urllib + +from django.conf import settings +from django.views.decorators.cache import cache_control +from mitxmako.shortcuts import render_to_response +from django.core.urlresolvers import reverse + +from student.models import unique_id_for_user +from courseware.courses import get_course_with_access + +from peer_grading_service import PeerGradingService +from peer_grading_service import MockPeerGradingService +from grading_service import GradingServiceError +import json +from .staff_grading import StaffGrading + + +log = logging.getLogger(__name__) + +template_imports = {'urllib': urllib} +if settings.MOCK_PEER_GRADING: + peer_gs = MockPeerGradingService() +else: + peer_gs = PeerGradingService(settings.PEER_GRADING_INTERFACE) + +""" +Reverses the URL from the name and the course id, and then adds a trailing slash if +it does not exist yet + +""" +def _reverse_with_slash(url_name, course_id): + ajax_url = reverse(url_name, kwargs={'course_id': course_id}) + if not ajax_url.endswith('/'): + ajax_url += '/' + return ajax_url + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def staff_grading(request, course_id): + """ + Show the instructor grading interface. + """ + course = get_course_with_access(request.user, course_id, 'staff') + + ajax_url = _reverse_with_slash('staff_grading', course_id) + + return render_to_response('instructor/staff_grading.html', { + 'course': course, + 'course_id': course_id, + 'ajax_url': ajax_url, + # Checked above + 'staff_access': True, }) + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def peer_grading(request, course_id): + ''' + Show a peer grading interface + ''' + course = get_course_with_access(request.user, course_id, 'load') + + # call problem list service + success = False + error_text = "" + problem_list = [] + try: + problem_list_json = peer_gs.get_problem_list(course_id, unique_id_for_user(request.user)) + problem_list_dict = json.loads(problem_list_json) + success = problem_list_dict['success'] + if 'error' in problem_list_dict: + error_text = problem_list_dict['error'] + + problem_list = problem_list_dict['problem_list'] + + except GradingServiceError: + error_text = "Error occured while contacting the grading service" + success = False + # catch error if if the json loads fails + except ValueError: + error_text = "Could not get problem list" + success = False + + ajax_url = _reverse_with_slash('peer_grading', course_id) + + return render_to_response('peer_grading/peer_grading.html', { + 'course': course, + 'course_id': course_id, + 'ajax_url': ajax_url, + 'success': success, + 'problem_list': problem_list, + 'error_text': error_text, + # Checked above + 'staff_access': False, }) + + +@cache_control(no_cache=True, no_store=True, must_revalidate=True) +def peer_grading_problem(request, course_id): + ''' + Show individual problem interface + ''' + course = get_course_with_access(request.user, course_id, 'load') + problem_location = request.GET.get("location") + + ajax_url = _reverse_with_slash('peer_grading', course_id) + + return render_to_response('peer_grading/peer_grading_problem.html', { + 'view_html': '', + 'course': course, + 'problem_location': problem_location, + 'course_id': course_id, + 'ajax_url': ajax_url, + # Checked above + 'staff_access': False, }) + + diff --git a/lms/djangoapps/portal/README.md b/lms/djangoapps/portal/README.md new file mode 100644 index 0000000000..09930ec8fb --- /dev/null +++ b/lms/djangoapps/portal/README.md @@ -0,0 +1,15 @@ +## acceptance_testing + +This fake django app is here to support acceptance testing using lettuce + +splinter (which wraps selenium). + +First you need to make sure that you've installed the requirements. +This includes lettuce, selenium, splinter, etc. +Do this with: +```pip install -r test-requirements.txt``` + +The settings.py environment file used is named acceptance.py. +It uses a test SQLite database defined as ../db/test-mitx.db. +You need to first start up the server separately, then run the lettuce scenarios. + +Full documentation can be found on the wiki at this link. diff --git a/lms/djangoapps/portal/__init__.py b/lms/djangoapps/portal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lms/djangoapps/portal/features/common.py b/lms/djangoapps/portal/features/common.py new file mode 100644 index 0000000000..20c2ab56b8 --- /dev/null +++ b/lms/djangoapps/portal/features/common.py @@ -0,0 +1,84 @@ +from lettuce import world, step#, before, after +from factories import * +from django.core.management import call_command +from nose.tools import assert_equals, assert_in +from lettuce.django import django_url +from django.conf import settings +from django.contrib.auth.models import User +from student.models import CourseEnrollment +import time + +from logging import getLogger +logger = getLogger(__name__) + +@step(u'I wait (?:for )?"(\d+)" seconds?$') +def wait(step, seconds): + time.sleep(float(seconds)) + +@step('I (?:visit|access|open) the homepage$') +def i_visit_the_homepage(step): + world.browser.visit(django_url('/')) + assert world.browser.is_element_present_by_css('header.global', 10) + +@step(u'I (?:visit|access|open) the dashboard$') +def i_visit_the_dashboard(step): + world.browser.visit(django_url('/dashboard')) + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + +@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$') +def click_the_link_called(step, text): + world.browser.find_link_by_text(text).click() + +@step('I should be on the dashboard page$') +def i_should_be_on_the_dashboard(step): + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + assert world.browser.title == 'Dashboard' + +@step(u'I (?:visit|access|open) the courses page$') +def i_am_on_the_courses_page(step): + world.browser.visit(django_url('/courses')) + assert world.browser.is_element_present_by_css('section.courses') + +@step('I should see that the path is "([^"]*)"$') +def i_should_see_that_the_path_is(step, path): + assert world.browser.url == django_url(path) + +@step(u'the page title should be "([^"]*)"$') +def the_page_title_should_be(step, title): + assert world.browser.title == title + +@step(r'should see that the url is "([^"]*)"$') +def should_have_the_url(step, url): + assert_equals(world.browser.url, url) + +@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$') +def should_see_a_link_called(step, text): + assert len(world.browser.find_link_by_text(text)) > 0 + +@step(r'should see "(.*)" (?:somewhere|anywhere) in (?:the|this) page') +def should_see_in_the_page(step, text): + assert_in(text, world.browser.html) + +@step('I am logged in$') +def i_am_logged_in(step): + world.create_user('robot') + world.log_in('robot@edx.org', 'test') + +@step('I am not logged in$') +def i_am_not_logged_in(step): + world.browser.cookies.delete() + +@step(u'I am registered for a course$') +def i_am_registered_for_a_course(step): + world.create_user('robot') + u = User.objects.get(username='robot') + CourseEnrollment.objects.create(user=u, course_id='MITx/6.002x/2012_Fall') + world.log_in('robot@edx.org', 'test') + +@step(u'I am an edX user$') +def i_am_an_edx_user(step): + world.create_user('robot') + +@step(u'User "([^"]*)" is an edX user$') +def registered_edx_user(step, uname): + world.create_user(uname) diff --git a/lms/djangoapps/portal/features/factories.py b/lms/djangoapps/portal/features/factories.py new file mode 100644 index 0000000000..07b615f468 --- /dev/null +++ b/lms/djangoapps/portal/features/factories.py @@ -0,0 +1,34 @@ +import factory +from student.models import User, UserProfile, Registration +from datetime import datetime +import uuid + +class UserProfileFactory(factory.Factory): + FACTORY_FOR = UserProfile + + user = None + name = 'Jack Foo' + level_of_education = None + gender = 'm' + mailing_address = None + goals = 'World domination' + +class RegistrationFactory(factory.Factory): + FACTORY_FOR = Registration + + user = None + activation_key = uuid.uuid4().hex + +class UserFactory(factory.Factory): + FACTORY_FOR = User + + username = 'robot' + email = 'robot+test@edx.org' + password = 'test' + first_name = 'Robot' + last_name = 'Test' + is_staff = False + is_active = True + is_superuser = False + last_login = datetime(2012, 1, 1) + date_joined = datetime(2011, 1, 1) diff --git a/lms/djangoapps/portal/features/homepage.feature b/lms/djangoapps/portal/features/homepage.feature new file mode 100644 index 0000000000..06a45c4bfa --- /dev/null +++ b/lms/djangoapps/portal/features/homepage.feature @@ -0,0 +1,47 @@ +Feature: Homepage for web users + In order to get an idea what edX is about + As a an anonymous web user + I want to check the information on the home page + + Scenario: User can see the "Login" button + Given I visit the homepage + Then I should see a link called "Log In" + + Scenario: User can see the "Sign up" button + Given I visit the homepage + Then I should see a link called "Sign Up" + + Scenario Outline: User can see main parts of the page + Given I visit the homepage + Then I should see a link called "" + When I click the link with the text "" + Then I should see that the path is "" + + Examples: + | Link | Path | + | Find Courses | /courses | + | About | /about | + | Jobs | /jobs | + | Contact | /contact | + + Scenario: User can visit the blog + Given I visit the homepage + When I click the link with the text "Blog" + Then I should see that the url is "http://blog.edx.org/" + + # TODO: test according to domain or policy + Scenario: User can see the partner institutions + Given I visit the homepage + Then I should see "" in the Partners section + + Examples: + | Partner | + | MITx | + | HarvardX | + | BerkeleyX | + | UTx | + | WellesleyX | + | GeorgetownX | + + # # TODO: Add scenario that tests the courses available + # # using a policy or a configuration file diff --git a/lms/djangoapps/portal/features/homepage.py b/lms/djangoapps/portal/features/homepage.py new file mode 100644 index 0000000000..638d65077c --- /dev/null +++ b/lms/djangoapps/portal/features/homepage.py @@ -0,0 +1,8 @@ +from lettuce import world, step +from nose.tools import assert_in + +@step('I should see "([^"]*)" in the Partners section$') +def i_should_see_partner(step, partner): + partners = world.browser.find_by_css(".partner .name span") + names = set(span.text for span in partners) + assert_in(partner, names) diff --git a/lms/djangoapps/portal/features/login.feature b/lms/djangoapps/portal/features/login.feature new file mode 100644 index 0000000000..23317b4876 --- /dev/null +++ b/lms/djangoapps/portal/features/login.feature @@ -0,0 +1,27 @@ +Feature: Login in as a registered user + As a registered user + In order to access my content + I want to be able to login in to edX + + Scenario: Login to an unactivated account + Given I am an edX user + And I am an unactivated user + And I visit the homepage + When I click the link with the text "Log In" + And I submit my credentials on the login form + Then I should see the login error message "This account has not been activated" + + Scenario: Login to an activated account + Given I am an edX user + And I am an activated user + And I visit the homepage + When I click the link with the text "Log In" + And I submit my credentials on the login form + Then I should be on the dashboard page + + Scenario: Logout of a signed in account + Given I am logged in + When I click the dropdown arrow + And I click the link with the text "Log Out" + Then I should see a link with the text "Log In" + And I should see that the path is "/" diff --git a/lms/djangoapps/portal/features/login.py b/lms/djangoapps/portal/features/login.py new file mode 100644 index 0000000000..5f200eb259 --- /dev/null +++ b/lms/djangoapps/portal/features/login.py @@ -0,0 +1,45 @@ +from lettuce import step, world +from django.contrib.auth.models import User + +@step('I am an unactivated user$') +def i_am_an_unactivated_user(step): + user_is_an_unactivated_user('robot') + +@step('I am an activated user$') +def i_am_an_activated_user(step): + user_is_an_activated_user('robot') + +@step('I submit my credentials on the login form') +def i_submit_my_credentials_on_the_login_form(step): + fill_in_the_login_form('email', 'robot@edx.org') + fill_in_the_login_form('password', 'test') + login_form = world.browser.find_by_css('form#login_form') + login_form.find_by_value('Access My Courses').click() + +@step(u'I should see the login error message "([^"]*)"$') +def i_should_see_the_login_error_message(step, msg): + login_error_div = world.browser.find_by_css('form#login_form #login_error') + assert (msg in login_error_div.text) + +@step(u'click the dropdown arrow$') +def click_the_dropdown(step): + css = ".dropdown" + e = world.browser.find_by_css(css) + e.click() + +#### helper functions + +def user_is_an_unactivated_user(uname): + u = User.objects.get(username=uname) + u.is_active = False + u.save() + +def user_is_an_activated_user(uname): + u = User.objects.get(username=uname) + u.is_active = True + u.save() + +def fill_in_the_login_form(field, value): + login_form = world.browser.find_by_css('form#login_form') + form_field = login_form.find_by_name(field) + form_field.fill(value) diff --git a/lms/djangoapps/portal/features/registration.feature b/lms/djangoapps/portal/features/registration.feature new file mode 100644 index 0000000000..d8a6796ee3 --- /dev/null +++ b/lms/djangoapps/portal/features/registration.feature @@ -0,0 +1,17 @@ +Feature: Register for a course + As a registered user + In order to access my class content + I want to register for a class on the edX website + + Scenario: I can register for a course + Given I am logged in + And I visit the courses page + When I register for the course numbered "6.002x" + Then I should see the course numbered "6.002x" in my dashboard + + Scenario: I can unregister for a course + Given I am registered for a course + And I visit the dashboard + When I click the link with the text "Unregister" + And I press the "Unregister" button in the Unenroll dialog + Then I should see "Looks like you haven't registered for any courses yet." somewhere in the page \ No newline at end of file diff --git a/lms/djangoapps/portal/features/registration.py b/lms/djangoapps/portal/features/registration.py new file mode 100644 index 0000000000..124bed4923 --- /dev/null +++ b/lms/djangoapps/portal/features/registration.py @@ -0,0 +1,24 @@ +from lettuce import world, step + +@step('I register for the course numbered "([^"]*)"$') +def i_register_for_the_course(step, course): + courses_section = world.browser.find_by_css('section.courses') + course_link_css = 'article[id*="%s"] a' % course + course_link = courses_section.find_by_css(course_link_css).first + course_link.click() + + intro_section = world.browser.find_by_css('section.intro') + register_link = intro_section.find_by_css('a.register') + register_link.click() + + assert world.browser.is_element_present_by_css('section.container.dashboard') + +@step(u'I should see the course numbered "([^"]*)" in my dashboard$') +def i_should_see_that_course_in_my_dashboard(step, course): + course_link_css = 'section.my-courses a[href*="%s"]' % course + assert world.browser.is_element_present_by_css(course_link_css) + +@step(u'I press the "([^"]*)" button in the Unenroll dialog') +def i_press_the_button_in_the_unenroll_dialog(step, value): + button_css = 'section#unenroll-modal input[value="%s"]' % value + world.browser.find_by_css(button_css).click() diff --git a/lms/djangoapps/portal/features/signup.feature b/lms/djangoapps/portal/features/signup.feature new file mode 100644 index 0000000000..b28a6819a1 --- /dev/null +++ b/lms/djangoapps/portal/features/signup.feature @@ -0,0 +1,16 @@ +Feature: Sign in + In order to use the edX content + As a new user + I want to signup for a student account + + Scenario: Sign up from the homepage + Given I visit the homepage + When I click the link with the text "Sign Up" + And I fill in "email" on the registration form with "robot2@edx.org" + And I fill in "password" on the registration form with "test" + And I fill in "username" on the registration form with "robot2" + And I fill in "name" on the registration form with "Robot Two" + And I check the checkbox named "terms_of_service" + And I check the checkbox named "honor_code" + And I press the "Create My Account" button on the registration form + Then I should see "THANKS FOR REGISTERING!" in the dashboard banner diff --git a/lms/djangoapps/portal/features/signup.py b/lms/djangoapps/portal/features/signup.py new file mode 100644 index 0000000000..afde72b589 --- /dev/null +++ b/lms/djangoapps/portal/features/signup.py @@ -0,0 +1,22 @@ +from lettuce import world, step + +@step('I fill in "([^"]*)" on the registration form with "([^"]*)"$') +def when_i_fill_in_field_on_the_registration_form_with_value(step, field, value): + register_form = world.browser.find_by_css('form#register_form') + form_field = register_form.find_by_name(field) + form_field.fill(value) + +@step('I press the "([^"]*)" button on the registration form$') +def i_press_the_button_on_the_registration_form(step, button): + register_form = world.browser.find_by_css('form#register_form') + register_form.find_by_value(button).click() + +@step('I check the checkbox named "([^"]*)"$') +def i_check_checkbox(step, checkbox): + world.browser.find_by_name(checkbox).check() + +@step('I should see "([^"]*)" in the dashboard banner$') +def i_should_see_text_in_the_dashboard_banner_section(step, text): + css_selector = "section.dashboard-banner h2" + assert (text in world.browser.find_by_css(css_selector).text) + \ No newline at end of file diff --git a/lms/djangoapps/terrain/__init__.py b/lms/djangoapps/terrain/__init__.py new file mode 100644 index 0000000000..dd6869e7fd --- /dev/null +++ b/lms/djangoapps/terrain/__init__.py @@ -0,0 +1,6 @@ +# Use this as your lettuce terrain file so that the common steps +# across all lms apps can be put in terrain/common +# See https://groups.google.com/forum/?fromgroups=#!msg/lettuce-users/5VyU9B4HcX8/USgbGIJdS5QJ +from terrain.browser import * +from terrain.steps import * +from terrain.factories import * \ No newline at end of file diff --git a/lms/djangoapps/terrain/browser.py b/lms/djangoapps/terrain/browser.py new file mode 100644 index 0000000000..7fe684e153 --- /dev/null +++ b/lms/djangoapps/terrain/browser.py @@ -0,0 +1,26 @@ +from lettuce import before, after, world +from splinter.browser import Browser +from logging import getLogger +import time + +logger = getLogger(__name__) +logger.info("Loading the lettuce acceptance testing terrain file...") + +from django.core.management import call_command + +@before.harvest +def initial_setup(server): + # Launch firefox + world.browser = Browser('firefox') + +@before.each_scenario +def reset_data(scenario): + # Clean out the django test database defined in the + # envs/acceptance.py file: mitx_all/db/test_mitx.db + logger.debug("Flushing the test database...") + call_command('flush', interactive=False) + +@after.all +def teardown_browser(total): + # Quit firefox + world.browser.quit() diff --git a/lms/djangoapps/terrain/factories.py b/lms/djangoapps/terrain/factories.py new file mode 100644 index 0000000000..ddab9e2b06 --- /dev/null +++ b/lms/djangoapps/terrain/factories.py @@ -0,0 +1,34 @@ +import factory +from student.models import User, UserProfile, Registration +from datetime import datetime +import uuid + +class UserProfileFactory(factory.Factory): + FACTORY_FOR = UserProfile + + user = None + name = 'Robot Test' + level_of_education = None + gender = 'm' + mailing_address = None + goals = 'World domination' + +class RegistrationFactory(factory.Factory): + FACTORY_FOR = Registration + + user = None + activation_key = uuid.uuid4().hex + +class UserFactory(factory.Factory): + FACTORY_FOR = User + + username = 'robot' + email = 'robot+test@edx.org' + password = 'test' + first_name = 'Robot' + last_name = 'Test' + is_staff = False + is_active = True + is_superuser = False + last_login = datetime(2012, 1, 1) + date_joined = datetime(2011, 1, 1) diff --git a/lms/djangoapps/terrain/steps.py b/lms/djangoapps/terrain/steps.py new file mode 100644 index 0000000000..ce82a0a044 --- /dev/null +++ b/lms/djangoapps/terrain/steps.py @@ -0,0 +1,171 @@ +from lettuce import world, step +from factories import * +from django.core.management import call_command +from lettuce.django import django_url +from django.conf import settings +from django.contrib.auth.models import User +from student.models import CourseEnrollment +from urllib import quote_plus +from nose.tools import assert_equals +from bs4 import BeautifulSoup +import time +import re +import os.path + +from logging import getLogger +logger = getLogger(__name__) + +@step(u'I wait (?:for )?"(\d+)" seconds?$') +def wait(step, seconds): + time.sleep(float(seconds)) + +@step('I (?:visit|access|open) the homepage$') +def i_visit_the_homepage(step): + world.browser.visit(django_url('/')) + assert world.browser.is_element_present_by_css('header.global', 10) + +@step(u'I (?:visit|access|open) the dashboard$') +def i_visit_the_dashboard(step): + world.browser.visit(django_url('/dashboard')) + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + +@step('I should be on the dashboard page$') +def i_should_be_on_the_dashboard(step): + assert world.browser.is_element_present_by_css('section.container.dashboard', 5) + assert world.browser.title == 'Dashboard' + +@step(u'I (?:visit|access|open) the courses page$') +def i_am_on_the_courses_page(step): + world.browser.visit(django_url('/courses')) + assert world.browser.is_element_present_by_css('section.courses') + +@step(u'I press the "([^"]*)" button$') +def and_i_press_the_button(step, value): + button_css = 'input[value="%s"]' % value + world.browser.find_by_css(button_css).first.click() + +@step('I should see that the path is "([^"]*)"$') +def i_should_see_that_the_path_is(step, path): + assert world.browser.url == django_url(path) + +@step(u'the page title should be "([^"]*)"$') +def the_page_title_should_be(step, title): + assert_equals(world.browser.title, title) + +@step('I am a logged in user$') +def i_am_logged_in_user(step): + create_user('robot') + log_in('robot@edx.org','test') + +@step('I am not logged in$') +def i_am_not_logged_in(step): + world.browser.cookies.delete() + +@step('I am registered for a course$') +def i_am_registered_for_a_course(step): + create_user('robot') + u = User.objects.get(username='robot') + CourseEnrollment.objects.get_or_create(user=u, course_id='MITx/6.002x/2012_Fall') + +@step('I am registered for course "([^"]*)"$') +def i_am_registered_for_course_by_id(step, course_id): + register_by_course_id(course_id) + +@step('I am staff for course "([^"]*)"$') +def i_am_staff_for_course_by_id(step, course_id): + register_by_course_id(course_id, True) + +@step('I log in$') +def i_log_in(step): + log_in('robot@edx.org','test') + +@step(u'I am an edX user$') +def i_am_an_edx_user(step): + create_user('robot') + +#### helper functions +@world.absorb +def create_user(uname): + portal_user = UserFactory.build(username=uname, email=uname + '@edx.org') + portal_user.set_password('test') + portal_user.save() + + registration = RegistrationFactory(user=portal_user) + registration.register(portal_user) + registration.activate() + + user_profile = UserProfileFactory(user=portal_user) + +@world.absorb +def log_in(email, password): + world.browser.cookies.delete() + world.browser.visit(django_url('/')) + world.browser.is_element_present_by_css('header.global', 10) + world.browser.click_link_by_href('#login-modal') + login_form = world.browser.find_by_css('form#login_form') + login_form.find_by_name('email').fill(email) + login_form.find_by_name('password').fill(password) + login_form.find_by_name('submit').click() + + # wait for the page to redraw + assert world.browser.is_element_present_by_css('.content-wrapper', 10) + +@world.absorb +def register_by_course_id(course_id, is_staff=False): + create_user('robot') + u = User.objects.get(username='robot') + if is_staff: + u.is_staff=True + u.save() + CourseEnrollment.objects.get_or_create(user=u, course_id=course_id) + +@world.absorb +def save_the_html(path='/tmp'): + u = world.browser.url + html = world.browser.html.encode('ascii', 'ignore') + filename = '%s.html' % quote_plus(u) + f = open('%s/%s' % (path, filename), 'w') + f.write(html) + f.close + +@world.absorb +def save_the_course_content(path='/tmp'): + html = world.browser.html.encode('ascii', 'ignore') + soup = BeautifulSoup(html) + + # get rid of the header, we only want to compare the body + soup.head.decompose() + + # for now, remove the data-id attributes, because they are + # causing mismatches between cms-master and master + for item in soup.find_all(attrs={'data-id': re.compile('.*')}): + del item['data-id'] + + # we also need to remove them from unrendered problems, + # where they are contained in the text of divs instead of + # in attributes of tags + # Be careful of whether or not it was the last attribute + # and needs a trailing space + for item in soup.find_all(text=re.compile(' data-id=".*?" ')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?" ', ' ', s)) + + for item in soup.find_all(text=re.compile(' data-id=".*?"')): + s = unicode(item.string) + item.string.replace_with(re.sub(' data-id=".*?"', ' ', s)) + + # prettify the html so it will compare better, with + # each HTML tag on its own line + output = soup.prettify() + + # use string slicing to grab everything after 'courseware/' in the URL + u = world.browser.url + section_url = u[u.find('courseware/')+11:] + + if not os.path.exists(path): + os.makedirs(path) + + filename = '%s.html' % (quote_plus(section_url)) + f = open('%s/%s' % (path, filename), 'w') + f.write(output) + f.close diff --git a/lms/envs/acceptance.py b/lms/envs/acceptance.py new file mode 100644 index 0000000000..e0857a4392 --- /dev/null +++ b/lms/envs/acceptance.py @@ -0,0 +1,41 @@ +""" +This config file extends the test environment configuration +so that we can run the lettuce acceptance tests. +""" +from .test import * + +# You need to start the server in debug mode, +# otherwise the browser will not render the pages correctly +DEBUG = True + +# Show the courses that are in the data directory +COURSES_ROOT = ENV_ROOT / "data" +DATA_DIR = COURSES_ROOT +MODULESTORE = { + 'default': { + 'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore', + 'OPTIONS': { + 'data_dir': DATA_DIR, + 'default_class': 'xmodule.hidden_module.HiddenDescriptor', + } + } +} + +# Set this up so that rake lms[acceptance] and running the +# harvest command both use the same (test) database +# which they can flush without messing up your dev db +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ENV_ROOT / "db" / "test_mitx.db", + 'TEST_NAME': ENV_ROOT / "db" / "test_mitx.db", + } +} + +# Do not display the YouTube videos in the browser while running the +# acceptance tests. This makes them faster and more reliable +MITX_FEATURES['STUB_VIDEO_FOR_TESTING'] = True + +# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command +INSTALLED_APPS += ('lettuce.django',) +LETTUCE_APPS = ('portal',) # dummy app covers the home page, login, registration, and course enrollment diff --git a/lms/envs/aws.py b/lms/envs/aws.py index b58bc5602b..7b8c48f4af 100644 --- a/lms/envs/aws.py +++ b/lms/envs/aws.py @@ -76,5 +76,8 @@ DATABASES = AUTH_TOKENS['DATABASES'] XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE'] +STAFF_GRADING_INTERFACE = AUTH_TOKENS.get('STAFF_GRADING_INTERFACE', STAFF_GRADING_INTERFACE) +PEER_GRADING_INTERFACE = AUTH_TOKENS.get('PEER_GRADING_INTERFACE', PEER_GRADING_INTERFACE) + PEARSON_TEST_USER = "pearsontest" PEARSON_TEST_PASSWORD = AUTH_TOKENS.get("PEARSON_TEST_PASSWORD") diff --git a/lms/envs/cms/acceptance.py b/lms/envs/cms/acceptance.py new file mode 100644 index 0000000000..e5ee2937f4 --- /dev/null +++ b/lms/envs/cms/acceptance.py @@ -0,0 +1,23 @@ +""" +This config file is a copy of dev environment without the Debug +Toolbar. I it suitable to run against acceptance tests. + +""" +from .dev import * + +# REMOVE DEBUG TOOLBAR + +INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar') +INSTALLED_APPS = tuple(e for e in INSTALLED_APPS if e != 'debug_toolbar_mongo') + +MIDDLEWARE_CLASSES = tuple(e for e in MIDDLEWARE_CLASSES \ + if e != 'debug_toolbar.middleware.DebugToolbarMiddleware') + + +########################### LETTUCE TESTING ########################## +MITX_FEATURES['DISPLAY_TOY_COURSES'] = True + +INSTALLED_APPS += ('lettuce.django',) +# INSTALLED_APPS += ('portal',) + +LETTUCE_APPS = ('portal',) # dummy app covers the home page, login, registration, and course enrollment diff --git a/lms/envs/common.py b/lms/envs/common.py index 5e22e70307..4c9b3799c7 100644 --- a/lms/envs/common.py +++ b/lms/envs/common.py @@ -76,6 +76,8 @@ MITX_FEATURES = { 'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL + 'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests + # extrernal access methods 'ACCESS_REQUIRE_STAFF_FOR_COURSE': False, 'AUTH_USE_OPENID': False, @@ -190,6 +192,9 @@ DEBUG_TRACK_LOG = False MITX_ROOT_URL = '' +LOGIN_REDIRECT_URL = MITX_ROOT_URL + '/accounts/login' +LOGIN_URL = MITX_ROOT_URL + '/accounts/login' + COURSE_NAME = "6.002_Spring_2012" COURSE_NUMBER = "6.002x" COURSE_TITLE = "Circuits and Electronics" @@ -325,6 +330,37 @@ WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False WIKI_LINK_LIVE_LOOKUPS = False WIKI_LINK_DEFAULT_LEVEL = 2 +################################# Staff grading config ##################### + +#By setting up the default settings with an incorrect user name and password, +# will get an error when attempting to connect +STAFF_GRADING_INTERFACE = { + 'url': 'http://sandbox-grader-001.m.edx.org/staff_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + } + +# Used for testing, debugging +MOCK_STAFF_GRADING = False + +################################# Pearson TestCenter config ################ + +PEARSONVUE_SIGNINPAGE_URL = "https://www1.pearsonvue.com/testtaker/signin/SignInPage/EDX" +# TESTCENTER_ACCOMMODATION_REQUEST_EMAIL = "exam-help@edx.org" + +################################# Peer grading config ##################### + +#By setting up the default settings with an incorrect user name and password, +# will get an error when attempting to connect +PEER_GRADING_INTERFACE = { + 'url': 'http://sandbox-grader-001.m.edx.org/peer_grading', + 'username': 'incorrect_user', + 'password': 'incorrect_pass', + } + +# Used for testing, debugging +MOCK_PEER_GRADING = False + ################################# Jasmine ################################### JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee' @@ -400,6 +436,9 @@ courseware_only_js += [ ] main_vendor_js = [ + 'js/vendor/RequireJS.js', + 'js/vendor/json2.js', + 'js/vendor/RequireJS.js', 'js/vendor/jquery.min.js', 'js/vendor/jquery-ui.min.js', 'js/vendor/jquery.cookie.js', @@ -409,6 +448,10 @@ main_vendor_js = [ discussion_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/discussion/**/*.coffee')) +staff_grading_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/staff_grading/**/*.coffee')) +peer_grading_js = sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/peer_grading/**/*.coffee')) + + # Load javascript from all of the available xmodules, and # prep it for use in pipeline js from xmodule.x_module import XModuleDescriptor @@ -471,7 +514,8 @@ with open(module_styles_path, 'w') as module_styles: PIPELINE_JS = { 'application': { - # Application will contain all paths not in courseware_only_js + # Application will contain all paths not in courseware_only_js or + # discussion_js or staff_grading_js 'source_filenames': [ pth.replace(COMMON_ROOT / 'static/', '') for pth @@ -479,7 +523,10 @@ PIPELINE_JS = { ] + [ pth.replace(PROJECT_ROOT / 'static/', '') for pth in sorted(glob2.glob(PROJECT_ROOT / 'static/coffee/src/**/*.coffee'))\ - if pth not in courseware_only_js and pth not in discussion_js + if (pth not in courseware_only_js and + pth not in discussion_js and + pth not in peer_grading_js and + pth not in staff_grading_js) ] + [ 'js/form.ext.js', 'js/my_courses_dropdown.js', @@ -508,6 +555,14 @@ PIPELINE_JS = { 'discussion' : { 'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in discussion_js], 'output_filename': 'js/discussion.js' + }, + 'staff_grading' : { + 'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in staff_grading_js], + 'output_filename': 'js/staff_grading.js' + }, + 'peer_grading' : { + 'source_filenames': [pth.replace(PROJECT_ROOT / 'static/', '') for pth in peer_grading_js], + 'output_filename': 'js/peer_grading.js' } } @@ -581,6 +636,7 @@ INSTALLED_APPS = ( 'util', 'certificates', 'instructor', + 'open_ended_grading', 'psychometrics', 'licenses', diff --git a/lms/envs/dev.py b/lms/envs/dev.py index 0db028866a..50f1712752 100644 --- a/lms/envs/dev.py +++ b/lms/envs/dev.py @@ -41,7 +41,7 @@ DATABASES = { } CACHES = { - # This is the cache used for most things. + # This is the cache used for most things. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', @@ -104,8 +104,25 @@ SUBDOMAIN_BRANDING = { COMMENTS_SERVICE_KEY = "PUT_YOUR_API_KEY_HERE" +################################# mitx revision string ##################### +MITX_VERSION_STRING = os.popen('cd %s; git describe' % REPO_ROOT).read().strip() +################################# Staff grading config ##################### + +STAFF_GRADING_INTERFACE = { + 'url': 'http://127.0.0.1:3033/staff_grading', + 'username': 'lms', + 'password': 'abcd', + } + +################################# Peer grading config ##################### + +PEER_GRADING_INTERFACE = { + 'url': 'http://127.0.0.1:3033/peer_grading', + 'username': 'lms', + 'password': 'abcd', + } ################################ LMS Migration ################################# MITX_FEATURES['ENABLE_LMS_MIGRATION'] = True MITX_FEATURES['ACCESS_REQUIRE_STAFF_FOR_COURSE'] = False # require that user be in the staff_* group to be able to enroll diff --git a/lms/envs/logsettings.py b/lms/envs/logsettings.py index 0c0e0a577e..03d4cc67e6 100644 --- a/lms/envs/logsettings.py +++ b/lms/envs/logsettings.py @@ -40,7 +40,7 @@ def get_logger_config(log_dir, logging_env=logging_env, hostname=hostname) handlers = ['console', 'local'] if debug else ['console', - 'syslogger-remote', 'local', 'newrelic'] + 'syslogger-remote', 'local'] logger_config = { 'version': 1, diff --git a/lms/envs/test.py b/lms/envs/test.py index e815efdf4e..e9e4a43c6f 100644 --- a/lms/envs/test.py +++ b/lms/envs/test.py @@ -44,12 +44,6 @@ STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json" COURSES_ROOT = TEST_ROOT / "data" DATA_DIR = COURSES_ROOT -LOGGING = get_logger_config(TEST_ROOT / "log", - logging_env="dev", - tracking_filename="tracking.log", - dev_env=True, - debug=True) - COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" # Where the content data is checked out. This may not exist on jenkins. GITHUB_REPO_ROOT = ENV_ROOT / "data" @@ -65,6 +59,11 @@ XQUEUE_INTERFACE = { } XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds + +# Don't rely on a real staff grading backend +MOCK_STAFF_GRADING = True +MOCK_PEER_GRADING = True + # TODO (cpennington): We need to figure out how envs/test.py can inject things # into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ @@ -99,7 +98,7 @@ DATABASES = { } CACHES = { - # This is the cache used for most things. + # This is the cache used for most things. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', diff --git a/lms/lib/symmath/formula.py b/lms/lib/symmath/formula.py index 1698b004d9..bab0ab3691 100644 --- a/lms/lib/symmath/formula.py +++ b/lms/lib/symmath/formula.py @@ -422,7 +422,8 @@ class formula(object): def GetContentMathML(self, asciimath, mathml): # URL = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' - URL = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' + # URL = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' + URL = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' if 1: payload = {'asciiMathInput': asciimath, @@ -430,7 +431,7 @@ class formula(object): #'asciiMathML':unicode(mathml).encode('utf-8'), } headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"} - r = requests.post(URL, data=payload, headers=headers) + r = requests.post(URL, data=payload, headers=headers, verify=False) r.encoding = 'utf-8' ret = r.text #print "encoding: ",r.encoding diff --git a/lms/static/admin/css/ie.css b/lms/static/admin/css/ie.css deleted file mode 100644 index fd00f7f204..0000000000 --- a/lms/static/admin/css/ie.css +++ /dev/null @@ -1,63 +0,0 @@ -/* IE 6 & 7 */ - -/* Proper fixed width for dashboard in IE6 */ - -.dashboard #content { - *width: 768px; -} - -.dashboard #content-main { - *width: 535px; -} - -/* IE 6 ONLY */ - -/* Keep header from flowing off the page */ - -#container { - _position: static; -} - -/* Put the right sidebars back on the page */ - -.colMS #content-related { - _margin-right: 0; - _margin-left: 10px; - _position: static; -} - -/* Put the left sidebars back on the page */ - -.colSM #content-related { - _margin-right: 10px; - _margin-left: -115px; - _position: static; -} - -.form-row { - _height: 1%; -} - -/* Fix right margin for changelist filters in IE6 */ - -#changelist-filter ul { - _margin-right: -10px; -} - -/* IE ignores min-height, but treats height as if it were min-height */ - -.change-list .filtered { - _height: 400px; -} - -/* IE doesn't know alpha transparency in PNGs */ - -.inline-deletelink { - background: transparent url(../img/inline-delete-8bit.png) no-repeat; -} - -/* IE7 doesn't support inline-block */ -.change-list ul.toplinks li { - zoom: 1; - *display: inline; -} \ No newline at end of file diff --git a/lms/static/coffee/files.json b/lms/static/coffee/files.json index 4721ef58bb..5dc03613b9 100644 --- a/lms/static/coffee/files.json +++ b/lms/static/coffee/files.json @@ -1,5 +1,6 @@ { "js_files": [ + "/static/js/vendor/RequireJS.js", "/static/js/vendor/jquery.min.js", "/static/js/vendor/jquery-ui.min.js", "/static/js/vendor/jquery.leanModal.min.js", diff --git a/lms/static/coffee/spec/requirejs_spec.coffee b/lms/static/coffee/spec/requirejs_spec.coffee new file mode 100644 index 0000000000..10d34a2f75 --- /dev/null +++ b/lms/static/coffee/spec/requirejs_spec.coffee @@ -0,0 +1,89 @@ +describe "RequireJS namespacing", -> + beforeEach -> + + # Jasmine does not provide a way to use the typeof operator. We need + # to create our own custom matchers so that a TypeError is not thrown. + @addMatchers + requirejsTobeUndefined: -> + typeof requirejs is "undefined" + + requireTobeUndefined: -> + typeof require is "undefined" + + defineTobeUndefined: -> + typeof define is "undefined" + + + it "check that the RequireJS object is present in the global namespace", -> + expect(RequireJS).toEqual jasmine.any(Object) + expect(window.RequireJS).toEqual jasmine.any(Object) + + it "check that requirejs(), require(), and define() are not in the global namespace", -> + + # The custom matchers that we defined in the beforeEach() function do + # not operate on an object. We pass a dummy empty object {} not to + # confuse Jasmine. + expect({}).requirejsTobeUndefined() + expect({}).requireTobeUndefined() + expect({}).defineTobeUndefined() + expect(window.requirejs).not.toBeDefined() + expect(window.require).not.toBeDefined() + expect(window.define).not.toBeDefined() + + +describe "RequireJS module creation", -> + inDefineCallback = undefined + inRequireCallback = undefined + it "check that we can use RequireJS to define() and require() a module", -> + + # Because Require JS works asynchronously when defining and requiring + # modules, we need to use the special Jasmine functions runs(), and + # waitsFor() to set up this test. + runs -> + + # Initialize the variable that we will test for. They will be set + # to true in the appropriate callback functions called by Require + # JS. If their values do not change, this will mean that something + # is not working as is intended. + inDefineCallback = false + inRequireCallback = false + + # Define our test module. + RequireJS.define "test_module", [], -> + inDefineCallback = true + + # This module returns an object. It can be accessed via the + # Require JS require() function. + module_status: "OK" + + + # Require our defined test module. + RequireJS.require ["test_module"], (test_module) -> + inRequireCallback = true + + # If our test module was defined properly, then we should + # be able to get the object it returned, and query some + # property. + expect(test_module.module_status).toBe "OK" + + + + # We will wait for a specified amount of time (1 second), before + # checking if our module was defined and that we were able to + # require() the module. + waitsFor (-> + + # If at least one of the callback functions was not reached, we + # fail this test. + return false if (inDefineCallback isnt true) or (inRequireCallback isnt true) + + # Both of the callbacks were reached. + true + ), "We should eventually end up in the defined callback", 1000 + + # The final test behavior, after waitsFor() finishes waiting. + runs -> + expect(inDefineCallback).toBeTruthy() + expect(inRequireCallback).toBeTruthy() + + diff --git a/lms/static/coffee/src/courseware.coffee b/lms/static/coffee/src/courseware.coffee index 096094ead9..0992043e79 100644 --- a/lms/static/coffee/src/courseware.coffee +++ b/lms/static/coffee/src/courseware.coffee @@ -18,5 +18,6 @@ class @Courseware histg = new Histogram id, $(this).data('histogram') catch error histg = error - console.log(error) + if console? + console.log(error) return histg diff --git a/lms/static/coffee/src/discussion/utils.coffee b/lms/static/coffee/src/discussion/utils.coffee index a032c0248f..6b2714dc54 100644 --- a/lms/static/coffee/src/discussion/utils.coffee +++ b/lms/static/coffee/src/discussion/utils.coffee @@ -249,7 +249,10 @@ class @DiscussionUtil $3 else if RE_DISPLAYMATH.test(text) text = text.replace RE_DISPLAYMATH, ($0, $1, $2, $3) -> - processedText += $1 + processor("$$" + $2 + "$$", 'display') + #processedText += $1 + processor("$$" + $2 + "$$", 'display') + #bug fix, ordering is off + processedText = processor("$$" + $2 + "$$", 'display') + processedText + processedText = $1 + processedText $3 else processedText += text diff --git a/lms/static/coffee/src/main.coffee b/lms/static/coffee/src/main.coffee index ec5cbdec5b..df4c8861f6 100644 --- a/lms/static/coffee/src/main.coffee +++ b/lms/static/coffee/src/main.coffee @@ -32,8 +32,18 @@ $ -> $('#login').click -> $('#login_form input[name="email"]').focus() + _gaq.push(['_trackPageview', '/login']) false $('#signup').click -> $('#signup-modal input[name="email"]').focus() + _gaq.push(['_trackPageview', '/signup']) false + + # fix for ie + if !Array::indexOf + Array::indexOf = (obj, start = 0) -> + for ele, i in this[start..] + if ele is obj + return i + start + return -1 diff --git a/lms/static/coffee/src/peer_grading/peer_grading.coffee b/lms/static/coffee/src/peer_grading/peer_grading.coffee new file mode 100644 index 0000000000..0736057df8 --- /dev/null +++ b/lms/static/coffee/src/peer_grading/peer_grading.coffee @@ -0,0 +1,13 @@ +# This is a simple class that just hides the error container +# and message container when they are empty +# Can (and should be) expanded upon when our problem list +# becomes more sophisticated +class PeerGrading + constructor: () -> + @error_container = $('.error-container') + @error_container.toggle(not @error_container.is(':empty')) + + @message_container = $('.message-container') + @message_container.toggle(not @message_container.is(':empty')) + +$(document).ready(() -> new PeerGrading()) diff --git a/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee b/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee new file mode 100644 index 0000000000..e294c50f7c --- /dev/null +++ b/lms/static/coffee/src/peer_grading/peer_grading_problem.coffee @@ -0,0 +1,390 @@ +################################## +# +# This is the JS that renders the peer grading problem page. +# Fetches the correct problem and/or calibration essay +# and sends back the grades +# +# Should not be run when we don't have a location to send back +# to the server +# +# PeerGradingProblemBackend - +# makes all the ajax requests and provides a mock interface +# for testing purposes +# +# PeerGradingProblem - +# handles the rendering and user interactions with the interface +# +################################## +class PeerGradingProblemBackend + constructor: (ajax_url, mock_backend) -> + @mock_backend = mock_backend + @ajax_url = ajax_url + @mock_cnt = 0 + + post: (cmd, data, callback) -> + if @mock_backend + callback(@mock(cmd, data)) + else + # if this post request fails, the error callback will catch it + $.post(@ajax_url + cmd, data, callback) + .error => callback({success: false, error: "Error occured while performing this operation"}) + + mock: (cmd, data) -> + if cmd == 'is_student_calibrated' + # change to test each version + response = + success: true + calibrated: @mock_cnt >= 2 + else if cmd == 'show_calibration_essay' + #response = + # success: false + # error: "There was an error" + @mock_cnt++ + response = + success: true + submission_id: 1 + submission_key: 'abcd' + student_response: ''' + Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. + +The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. + ''' + prompt: ''' +

    S11E3: Metal Bands

    +

    Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

    +

    * Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

    +

    This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

    + ''' + rubric: ''' +
      +
    • Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.
    • +
    • Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.
    • +
    • Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.
    • +
    + +

    Please score your response according to how many of the above components you identified:

    + ''' + max_score: 4 + else if cmd == 'get_next_submission' + response = + success: true + submission_id: 1 + submission_key: 'abcd' + student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa. + +Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum. + +Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. ''' + prompt: ''' +

    S11E3: Metal Bands

    +

    Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

    +

    * Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

    +

    This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

    + ''' + rubric: ''' +
      +
    • Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.
    • +
    • Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.
    • +
    • Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.
    • +
    + +

    Please score your response according to how many of the above components you identified:

    + ''' + max_score: 4 + else if cmd == 'save_calibration_essay' + response = + success: true + actual_score: 2 + else if cmd == 'save_grade' + response = + success: true + + return response + + +class PeerGradingProblem + constructor: (backend) -> + @prompt_wrapper = $('.prompt-wrapper') + @backend = backend + + + # get the location of the problem + @location = $('.peer-grading').data('location') + # prevent this code from trying to run + # when we don't have a location + if(!@location) + return + + # get the other elements we want to fill in + @submission_container = $('.submission-container') + @prompt_container = $('.prompt-container') + @rubric_container = $('.rubric-container') + @calibration_panel = $('.calibration-panel') + @grading_panel = $('.grading-panel') + @content_panel = $('.content-panel') + @grading_message = $('.grading-message') + @grading_message.hide() + + @grading_wrapper =$('.grading-wrapper') + @calibration_feedback_panel = $('.calibration-feedback') + @interstitial_page = $('.interstitial-page') + @interstitial_page.hide() + + @error_container = $('.error-container') + + @submission_key_input = $("input[name='submission-key']") + @essay_id_input = $("input[name='essay-id']") + @feedback_area = $('.feedback-area') + + @score_selection_container = $('.score-selection-container') + @score = null + @calibration = null + + @submit_button = $('.submit-button') + @action_button = $('.action-button') + @calibration_feedback_button = $('.calibration-feedback-button') + @interstitial_page_button = $('.interstitial-page-button') + + Collapsible.setCollapsibles(@content_panel) + + # Set up the click event handlers + @action_button.click -> history.back() + @calibration_feedback_button.click => + @calibration_feedback_panel.hide() + @grading_wrapper.show() + @is_calibrated_check() + + @interstitial_page_button.click => + @interstitial_page.hide() + @is_calibrated_check() + + @is_calibrated_check() + + + ########## + # + # Ajax calls to the backend + # + ########## + is_calibrated_check: () => + @backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback) + + fetch_calibration_essay: () => + @backend.post('show_calibration_essay', {location: @location}, @render_calibration) + + fetch_submission_essay: () => + @backend.post('get_next_submission', {location: @location}, @render_submission) + + construct_data: () -> + data = + score: @score + location: @location + submission_id: @essay_id_input.val() + submission_key: @submission_key_input.val() + feedback: @feedback_area.val() + return data + + + submit_calibration_essay: ()=> + data = @construct_data() + @backend.post('save_calibration_essay', data, @calibration_callback) + + submit_grade: () => + data = @construct_data() + @backend.post('save_grade', data, @submission_callback) + + + ########## + # + # Callbacks for various events + # + ########## + + # called after we perform an is_student_calibrated check + calibration_check_callback: (response) => + if response.success + # if we haven't been calibrating before + if response.calibrated and (@calibration == null or @calibration == false) + @calibration = false + @fetch_submission_essay() + # If we were calibrating before and no longer need to, + # show the interstitial page + else if response.calibrated and @calibration == true + @calibration = false + @render_interstitial_page() + else + @calibration = true + @fetch_calibration_essay() + else if response.error + @render_error(response.error) + else + @render_error("Error contacting the grading service") + + + # called after we submit a calibration score + calibration_callback: (response) => + if response.success + @render_calibration_feedback(response) + else if response.error + @render_error(response.error) + else + @render_error("Error saving calibration score") + + # called after we submit a submission score + submission_callback: (response) => + if response.success + @is_calibrated_check() + @grading_message.fadeIn() + @grading_message.html("

    Grade sent successfully.

    ") + else + if response.error + @render_error(response.error) + else + @render_error("Error occurred while submitting grade") + + # called after a grade is selected on the interface + graded_callback: (event) => + @grading_message.hide() + @score = event.target.value + @show_submit_button() + + + + ########## + # + # Rendering methods and helpers + # + ########## + # renders a calibration essay + render_calibration: (response) => + if response.success + + # load in all the data + @submission_container.html("

    Training Essay

    ") + @render_submission_data(response) + # TODO: indicate that we're in calibration mode + @calibration_panel.addClass('current-state') + @grading_panel.removeClass('current-state') + + # Display the right text + # both versions of the text are written into the template itself + # we only need to show/hide the correct ones at the correct time + @calibration_panel.find('.calibration-text').show() + @grading_panel.find('.calibration-text').show() + @calibration_panel.find('.grading-text').hide() + @grading_panel.find('.grading-text').hide() + + + @submit_button.unbind('click') + @submit_button.click @submit_calibration_essay + + else if response.error + @render_error(response.error) + else + @render_error("An error occurred while retrieving the next calibration essay") + + # Renders a student submission to be graded + render_submission: (response) => + if response.success + @submit_button.hide() + @submission_container.html("

    Submitted Essay

    ") + @render_submission_data(response) + + @calibration_panel.removeClass('current-state') + @grading_panel.addClass('current-state') + + # Display the correct text + # both versions of the text are written into the template itself + # we only need to show/hide the correct ones at the correct time + @calibration_panel.find('.calibration-text').hide() + @grading_panel.find('.calibration-text').hide() + @calibration_panel.find('.grading-text').show() + @grading_panel.find('.grading-text').show() + + @submit_button.unbind('click') + @submit_button.click @submit_grade + else if response.error + @render_error(response.error) + else + @render_error("An error occured when retrieving the next submission.") + + + make_paragraphs: (text) -> + paragraph_split = text.split(/\n\s*\n/) + new_text = '' + for paragraph in paragraph_split + new_text += "

    #{paragraph}

    " + return new_text + + # render common information between calibration and grading + render_submission_data: (response) => + @content_panel.show() + + @submission_container.append(@make_paragraphs(response.student_response)) + @prompt_container.html(response.prompt) + @rubric_container.html(response.rubric) + @submission_key_input.val(response.submission_key) + @essay_id_input.val(response.submission_id) + @setup_score_selection(response.max_score) + + @submit_button.hide() + @action_button.hide() + @calibration_feedback_panel.hide() + + + render_calibration_feedback: (response) => + # display correct grade + @calibration_feedback_panel.slideDown() + calibration_wrapper = $('.calibration-feedback-wrapper') + calibration_wrapper.html("

    The score you gave was: #{@score}. The actual score is: #{response.actual_score}

    ") + + + score = parseInt(@score) + actual_score = parseInt(response.actual_score) + + if score == actual_score + calibration_wrapper.append("

    Congratulations! Your score matches the actual score!

    ") + else + calibration_wrapper.append("

    Please try to understand the grading critera better to be more accurate next time.

    ") + + # disable score selection and submission from the grading interface + $("input[name='score-selection']").attr('disabled', true) + @submit_button.hide() + + render_interstitial_page: () => + @content_panel.hide() + @interstitial_page.show() + + render_error: (error_message) => + @error_container.show() + @calibration_feedback_panel.hide() + @error_container.html(error_message) + @content_panel.hide() + @action_button.show() + + show_submit_button: () => + @submit_button.show() + + setup_score_selection: (max_score) => + # first, get rid of all the old inputs, if any. + @score_selection_container.html('Choose score: ') + + # Now create new labels and inputs for each possible score. + for score in [0..max_score] + id = 'score-' + score + label = """""" + + input = """ + + """ # " fix broken parsing in emacs + @score_selection_container.append(input + label) + + # And now hook up an event handler again + $("input[name='score-selection']").change @graded_callback + + + +mock_backend = false +ajax_url = $('.peer-grading').data('ajax_url') +backend = new PeerGradingProblemBackend(ajax_url, mock_backend) +$(document).ready(() -> new PeerGradingProblem(backend)) diff --git a/lms/static/coffee/src/staff_grading/staff_grading.coffee b/lms/static/coffee/src/staff_grading/staff_grading.coffee new file mode 100644 index 0000000000..d36cf45c64 --- /dev/null +++ b/lms/static/coffee/src/staff_grading/staff_grading.coffee @@ -0,0 +1,404 @@ +# wrap everything in a class in case we want to use inside xmodules later + +get_random_int: (min, max) -> + return Math.floor(Math.random() * (max - min + 1)) + min + +# states +state_grading = "grading" +state_graded = "graded" +state_no_data = "no_data" +state_error = "error" + +class StaffGradingBackend + constructor: (ajax_url, mock_backend) -> + @ajax_url = ajax_url + @mock_backend = mock_backend + if @mock_backend + @mock_cnt = 0 + + mock: (cmd, data) -> + # Return a mock response to cmd and data + # should take a location as an argument + if cmd == 'get_next' + @mock_cnt++ + switch data.location + when 'i4x://MITx/3.091x/problem/open_ended_demo1' + response = + success: true + problem_name: 'Problem 1' + num_graded: 3 + min_for_ml: 5 + num_pending: 4 + prompt: ''' +

    S11E3: Metal Bands

    +

    Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.

    + +

    * Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled?

    +

    This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.

    + ''' + submission: ''' + Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32. + +The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham. + ''' + rubric: ''' +
      +
    • Metals tend to be good electronic conductors, meaning that they have a large number of electrons which are able to access empty (mobile) energy states within the material.
    • +
    • Sodium has a half-filled s-band, so there are a number of empty states immediately above the highest occupied energy levels within the band.
    • +
    • Magnesium has a full s-band, but the the s-band and p-band overlap in magnesium. Thus are still a large number of available energy states immediately above the s-band highest occupied energy level.
    • +
    + +

    Please score your response according to how many of the above components you identified:

    + ''' + submission_id: @mock_cnt + max_score: 2 + @mock_cnt % 3 + ml_error_info : 'ML accuracy info: ' + @mock_cnt + when 'i4x://MITx/3.091x/problem/open_ended_demo2' + response = + success: true + problem_name: 'Problem 2' + num_graded: 2 + min_for_ml: 5 + num_pending: 4 + prompt: 'This is a fake second problem' + submission: 'This is the best submission ever! ' + @mock_cnt + rubric: 'I am a rubric for grading things! ' + @mock_cnt + submission_id: @mock_cnt + max_score: 2 + @mock_cnt % 3 + ml_error_info : 'ML accuracy info: ' + @mock_cnt + else + response = + success: false + + + else if cmd == 'save_grade' + console.log("eval: #{data.score} pts, Feedback: #{data.feedback}") + response = + @mock('get_next', {location: data.location}) + # get_problem_list + # should get back a list of problem_ids, problem_names, num_graded, min_for_ml + else if cmd == 'get_problem_list' + @mock_cnt = 1 + response = + success: true + problem_list: [ + {location: 'i4x://MITx/3.091x/problem/open_ended_demo1', \ + problem_name: "Problem 1", num_graded: 3, num_pending: 5, min_for_ml: 10}, + {location: 'i4x://MITx/3.091x/problem/open_ended_demo2', \ + problem_name: "Problem 2", num_graded: 1, num_pending: 5, min_for_ml: 10} + ] + else + response = + success: false + error: 'Unknown command ' + cmd + + if @mock_cnt % 5 == 0 + response = + success: true + message: 'No more submissions' + + + if @mock_cnt % 7 == 0 + response = + success: false + error: 'An error for testing' + + return response + + + post: (cmd, data, callback) -> + if @mock_backend + callback(@mock(cmd, data)) + else + # TODO: replace with postWithPrefix when that's loaded + $.post(@ajax_url + cmd, data, callback) + .error => callback({success: false, error: "Error occured while performing this operation"}) + + +class StaffGrading + constructor: (backend) -> + @backend = backend + + # all the jquery selectors + + @problem_list_container = $('.problem-list-container') + @problem_list = $('.problem-list') + + @error_container = $('.error-container') + @message_container = $('.message-container') + + @prompt_name_container = $('.prompt-name') + @prompt_container = $('.prompt-container') + @prompt_wrapper = $('.prompt-wrapper') + + @submission_container = $('.submission-container') + @submission_wrapper = $('.submission-wrapper') + + @rubric_container = $('.rubric-container') + @rubric_wrapper = $('.rubric-wrapper') + @grading_wrapper = $('.grading-wrapper') + + @feedback_area = $('.feedback-area') + @score_selection_container = $('.score-selection-container') + + @submit_button = $('.submit-button') + @action_button = $('.action-button') + @skip_button = $('.skip-button') + + @problem_meta_info = $('.problem-meta-info-container') + @meta_info_wrapper = $('.meta-info-wrapper') + @ml_error_info_container = $('.ml-error-info-container') + + @breadcrumbs = $('.breadcrumbs') + + # model state + @state = state_no_data + @submission_id = null + @prompt = '' + @submission = '' + @rubric = '' + @error_msg = '' + @message = '' + @max_score = 0 + @ml_error_info= '' + @location = '' + @prompt_name = '' + @min_for_ml = 0 + @num_graded = 0 + @num_pending = 0 + + @score = null + @problems = null + + # action handlers + @submit_button.click @submit + # TODO: fix this to do something more intelligent + @action_button.click @submit + @skip_button.click @skip_and_get_next + + # send initial request automatically + @get_problem_list() + + + setup_score_selection: => + # first, get rid of all the old inputs, if any. + @score_selection_container.html('Choose score: ') + + # Now create new labels and inputs for each possible score. + for score in [0..@max_score] + id = 'score-' + score + label = """""" + + input = """ + + """ # " fix broken parsing in emacs + @score_selection_container.append(input + label) + + # And now hook up an event handler again + $("input[name='score-selection']").change @graded_callback + + + set_button_text: (text) => + @action_button.attr('value', text) + + graded_callback: (event) => + @score = event.target.value + @state = state_graded + @message = '' + @render_view() + + ajax_callback: (response) => + # always clear out errors and messages on transition. + @error_msg = '' + @message = '' + + if response.success + if response.problem_list + @problems = response.problem_list + else if response.submission + @data_loaded(response) + else + @no_more(response.message) + else + @error(response.error) + + @render_view() + + get_next_submission: (location) -> + @location = location + @list_view = false + @backend.post('get_next', {location: location}, @ajax_callback) + + skip_and_get_next: () => + data = + score: @score + feedback: @feedback_area.val() + submission_id: @submission_id + location: @location + skipped: true + @backend.post('save_grade', data, @ajax_callback) + + get_problem_list: () -> + @list_view = true + @backend.post('get_problem_list', {}, @ajax_callback) + + submit_and_get_next: () -> + data = + score: @score + feedback: @feedback_area.val() + submission_id: @submission_id + location: @location + + @backend.post('save_grade', data, @ajax_callback) + + error: (msg) -> + @error_msg = msg + @state = state_error + + data_loaded: (response) -> + @prompt = response.prompt + @submission = response.submission + @rubric = response.rubric + @submission_id = response.submission_id + @feedback_area.val('') + @max_score = response.max_score + @score = null + @ml_error_info=response.ml_error_info + @prompt_name = response.problem_name + @num_graded = response.num_graded + @min_for_ml = response.min_for_ml + @num_pending = response.num_pending + @state = state_grading + if not @max_score? + @error("No max score specified for submission.") + + no_more: (message) -> + @prompt = null + @prompt_name = '' + @num_graded = 0 + @min_for_ml = 0 + @submission = null + @rubric = null + @ml_error_info = null + @submission_id = null + @message = message + @score = null + @max_score = 0 + @state = state_no_data + + + render_view: () -> + # clear the problem list and breadcrumbs + @problem_list.html('') + @breadcrumbs.html('') + @problem_list_container.toggle(@list_view) + if @backend.mock_backend + @message = @message + "

    NOTE: Mocking backend.

    " + @message_container.html(@message) + @error_container.html(@error_msg) + @message_container.toggle(@message != "") + @error_container.toggle(@error_msg != "") + + + # only show the grading elements when we are not in list view or the state + # is invalid + show_grading_elements = !(@list_view || @state == state_error || + @state == state_no_data) + @prompt_wrapper.toggle(show_grading_elements) + @submission_wrapper.toggle(show_grading_elements) + @rubric_wrapper.toggle(show_grading_elements) + @grading_wrapper.toggle(show_grading_elements) + @meta_info_wrapper.toggle(show_grading_elements) + @action_button.hide() + + if @list_view + @render_list() + else + @render_problem() + + problem_link:(problem) -> + link = $('').attr('href', "javascript:void(0)").append( + "#{problem.problem_name} (#{problem.num_graded} graded, #{problem.num_pending} pending, required to grade #{problem.num_required} more)") + .click => + @get_next_submission problem.location + + make_paragraphs: (text) -> + paragraph_split = text.split(/\n\s*\n/) + new_text = '' + for paragraph in paragraph_split + new_text += "

    #{paragraph}

    " + return new_text + + render_list: () -> + for problem in @problems + @problem_list.append($('
  • ').append(@problem_link(problem))) + + render_problem: () -> + # make the view elements match the state. Idempotent. + show_submit_button = true + show_action_button = true + + problem_list_link = $('').attr('href', 'javascript:void(0);') + .append("< Back to problem list") + .click => @get_problem_list() + + # set up the breadcrumbing + @breadcrumbs.append(problem_list_link) + + + if @state == state_error + @set_button_text('Try loading again') + show_action_button = true + + else if @state == state_grading + @ml_error_info_container.html(@ml_error_info) + meta_list = $(" diff --git a/lms/templates/courseware/instructor_dashboard.html b/lms/templates/courseware/instructor_dashboard.html index 29f1602721..3260ee569b 100644 --- a/lms/templates/courseware/instructor_dashboard.html +++ b/lms/templates/courseware/instructor_dashboard.html @@ -42,6 +42,9 @@ table.stat_table td { a.selectedmode { background-color: yellow; } +textarea { + height: 200px; +} .jvectormap-label { position: absolute; @@ -102,14 +105,17 @@ function goto( mode) Psychometrics | %endif Admin | - Forum Admin + Forum Admin | + Enrollment | + Manage Groups %if settings.MITX_FEATURES.get('ENABLE_INSTRUCTOR_ANALYTICS'): | Analytics %endif ] -
    ${djangopid}
    +
    ${djangopid} + | ${mitx_version}
    @@ -117,6 +123,12 @@ function goto( mode) ##----------------------------------------------------------------------------- %if modeflag.get('Grades'): + + %if offline_grade_log: +

    Pre-computed grades ${offline_grade_log} available: Use? +

    + %endif +

    Gradebook

    @@ -142,6 +154,47 @@ function goto( mode)

    +
    + + %if settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') and instructor_access: + + <% + rg = course.metadata.get('remote_gradebook',{}) + %> + +

    Export grades to remote gradebook

    +

    The assignments defined for this course should match the ones + stored in the gradebook, for this to work properly!

    + +
      +
    • Gradebook name: ${rg.get('name','None defined!')} +
      +
      + + +
      +
      +
    • +
    • +
      +
      +
    • +
    • Assignment name: +
      +
      + + + +
    • +
    + + %endif + +

    Student-specific grade inspection and adjustment

    +

    edX email address or their username:

    +

    +

    and, if you want to reset the number of attempts for a problem, the urlname of that problem

    +

    %endif ##----------------------------------------------------------------------------- @@ -172,11 +225,22 @@ function goto( mode)

    - + +


    %endif + %if admin_access: +
    +

    + +

    + + +


    + %endif + %if settings.MITX_FEATURES['ENABLE_MANUAL_GIT_RELOAD'] and admin_access:

    @@ -212,9 +276,72 @@ function goto( mode) %endif %endif -

    +##----------------------------------------------------------------------------- +%if modeflag.get('Enrollment'): + +
    +

    + + +

    + Student Email: + + +


    + + %if settings.MITX_FEATURES.get('REMOTE_GRADEBOOK_URL','') and instructor_access: + + <% + rg = course.metadata.get('remote_gradebook',{}) + %> + +

    Pull enrollment from remote gradebook

    +
      +
    • Gradebook name: ${rg.get('name','None defined!')} +
    • Section:
    • +
    + + + + +
    + + %endif + +

    Add students: enter emails, separated by new lines or commas;

    + + + +%endif ##----------------------------------------------------------------------------- + +%if modeflag.get('Manage Groups'): + %if instructor_access: +
    +

    + +

    + Enter usernames or emails for students who should be beta-testers, one per line, or separated by commas. They will get to + see course materials early, as configured via the days_early_for_beta option in the course policy. +

    +

    + + + +

    +
    + %endif +%endif + + + +%if msg: +

    ${msg}

    +%endif +##----------------------------------------------------------------------------- +##----------------------------------------------------------------------------- + %if modeflag.get('Analytics'):

    @@ -373,7 +500,8 @@ function goto( mode) %endif ##----------------------------------------------------------------------------- -%if modeflag.get('Psychometrics') is None: + +%if datatable and modeflag.get('Psychometrics') is None:

    @@ -422,9 +550,6 @@ function goto( mode) ##----------------------------------------------------------------------------- ## always show msg -%if msg: -

    ${msg}

    -%endif ##----------------------------------------------------------------------------- %if modeflag.get('Admin'): diff --git a/lms/templates/courseware/progress.html b/lms/templates/courseware/progress.html index 81268ff081..fb163d112d 100644 --- a/lms/templates/courseware/progress.html +++ b/lms/templates/courseware/progress.html @@ -18,7 +18,7 @@ diff --git a/lms/templates/courseware/progress_graph.js b/lms/templates/courseware/progress_graph.js index 189137ada3..449cad766f 100644 --- a/lms/templates/courseware/progress_graph.js +++ b/lms/templates/courseware/progress_graph.js @@ -1,4 +1,4 @@ -<%page args="grade_summary, grade_cutoffs, graph_div_id, **kwargs"/> +<%page args="grade_summary, grade_cutoffs, graph_div_id, show_grade_breakdown = True, show_grade_cutoffs = True, **kwargs"/> <%! import json import math @@ -70,25 +70,26 @@ $(function () { series = categories.values() overviewBarX = tickIndex extraColorIndex = len(categories) #Keeping track of the next color to use for categories not in categories[] - - for section in grade_summary['grade_breakdown']: - if section['percent'] > 0: - if section['category'] in categories: - color = categories[ section['category'] ]['color'] - else: - color = colors[ extraColorIndex % len(colors) ] - extraColorIndex += 1 - - series.append({ - 'label' : section['category'] + "-grade_breakdown", - 'data' : [ [overviewBarX, section['percent']] ], - 'color' : color - }) - - detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ] - ticks += [ [overviewBarX, "Total"] ] - tickIndex += 1 + sectionSpacer + if show_grade_breakdown: + for section in grade_summary['grade_breakdown']: + if section['percent'] > 0: + if section['category'] in categories: + color = categories[ section['category'] ]['color'] + else: + color = colors[ extraColorIndex % len(colors) ] + extraColorIndex += 1 + + series.append({ + 'label' : section['category'] + "-grade_breakdown", + 'data' : [ [overviewBarX, section['percent']] ], + 'color' : color + }) + + detail_tooltips[section['category'] + "-grade_breakdown"] = [ section['detail'] ] + + ticks += [ [overviewBarX, "Total"] ] + tickIndex += 1 + sectionSpacer totalScore = grade_summary['percent'] detail_tooltips['Dropped Scores'] = dropped_score_tooltips @@ -97,10 +98,14 @@ $(function () { ## ----------------------------- Grade cutoffs ------------------------- ## grade_cutoff_ticks = [ [1, "100%"], [0, "0%"] ] - descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True) - for grade in descending_grades: - percent = grade_cutoffs[grade] - grade_cutoff_ticks.append( [ percent, "{0} {1:.0%}".format(grade, percent) ] ) + if show_grade_cutoffs: + grade_cutoff_ticks = [ [1, "100%"], [0, "0%"] ] + descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True) + for grade in descending_grades: + percent = grade_cutoffs[grade] + grade_cutoff_ticks.append( [ percent, "{0} {1:.0%}".format(grade, percent) ] ) + else: + grade_cutoff_ticks = [ ] %> var series = ${ json.dumps( series ) }; @@ -135,9 +140,11 @@ $(function () { var $grade_detail_graph = $("#${graph_div_id}"); if ($grade_detail_graph.length > 0) { var plot = $.plot($grade_detail_graph, series, options); - //We need to put back the plotting of the percent here - var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}}); - $grade_detail_graph.append('
    ${"{totalscore:.0%}".format(totalscore=totalScore)}
    '); + + %if show_grade_breakdown: + var o = plot.pointOffset({x: ${overviewBarX} , y: ${totalScore}}); + $grade_detail_graph.append('
    ${"{totalscore:.0%}".format(totalscore=totalScore)}
    '); + %endif } var previousPoint = null; diff --git a/lms/templates/courseware/xqa_interface.html b/lms/templates/courseware/xqa_interface.html index 73f7cc6f52..c314cc7fb0 100644 --- a/lms/templates/courseware/xqa_interface.html +++ b/lms/templates/courseware/xqa_interface.html @@ -14,7 +14,7 @@ function sendlog(element_id, edit_link, staff_context){ location: staff_context.location, category : staff_context.category, 'username' : staff_context.user.username, - return : 'query', + 'return' : 'query', format : 'html', email : staff_context.user.email, tag:$('#' + element_id + '_xqa_tag').val(), diff --git a/lms/templates/dashboard.html b/lms/templates/dashboard.html index 177eb276af..8ec58a6a28 100644 --- a/lms/templates/dashboard.html +++ b/lms/templates/dashboard.html @@ -198,86 +198,125 @@ course_target = reverse('about_course', args=[course.id]) %> - -
    -
    -
    -
    -
    -
    -

    ${get_course_about_section(course, 'university')}

    -

    ${course.number} ${course.title}

    -
    -
    -

    + + + + + +

    +
    +

    % if course.has_ended(): - Course Completed - ${course.end_date_text} + Course Completed - ${course.end_date_text} % elif course.has_started(): - Course Started - ${course.start_date_text} + Course Started - ${course.start_date_text} % else: # hasn't started yet - Course Starts - ${course.start_date_text} + Course Starts - ${course.start_date_text} % endif

    -
    - % if course.id in show_courseware_links_for: -

    View Courseware

    - % endif -
    - +

    ${get_course_about_section(course, 'university')}

    +

    ${course.number} ${course.title}

    + + + <% + testcenter_exam_info = course.current_test_center_exam + registration = exam_registrations.get(course.id) + testcenter_register_target = reverse('begin_exam_registration', args=[course.id]) + %> + % if testcenter_exam_info is not None: + + % if registration is None and testcenter_exam_info.is_registering(): +
    + Register for Pearson exam +

    Registration for the Pearson exam is now open and will close on ${testcenter_exam_info.registration_end_date_text}

    +
    + % endif + + % if registration is not None: + % if registration.is_accepted: +
    + Schedule Pearson exam +

    Registration number: ${registration.client_candidate_id}

    +

    Write this down! You’ll need it to schedule your exam.

    +
    + % endif + % if registration.is_rejected: +
    +

    Your registration for the Pearson exam has been rejected. Please see your registration status details. Otherwise contact edX at exam-help@edx.org for further help.

    +
    + % endif + % if not registration.is_accepted and not registration.is_rejected: +
    +

    Your registration for the Pearson exam is pending. Within a few days, you should see a confirmation number here, which can be used to schedule your exam.

    +
    + % endif + % endif + % endif + + <% + cert_status = cert_statuses.get(course.id) + %> + % if course.has_ended() and cert_status: + <% + if cert_status['status'] == 'generating': + status_css_class = 'course-status-certrendering' + elif cert_status['status'] == 'ready': + status_css_class = 'course-status-certavailable' + elif cert_status['status'] == 'notpassing': + status_css_class = 'course-status-certnotavailable' + else: + status_css_class = 'course-status-processing' + %> +
    + + % if cert_status['status'] == 'processing': +

    Final course details are being wrapped up at + this time. Your final standing will be available shortly.

    + % elif cert_status['status'] in ('generating', 'ready', 'notpassing'): +

    Your final grade: + ${"{0:.0f}%".format(float(cert_status['grade'])*100)}. + % if cert_status['status'] == 'notpassing': + Grade required for a certificate: + ${"{0:.0f}%".format(float(course.lowest_passing_grade)*100)}. + % endif +

    + % endif + + % if cert_status['show_disabled_download_button'] or cert_status['show_download_url'] or cert_status['show_survey_button']: + + % endif +
    + + % endif + + % if course.id in show_courseware_links_for: + % if course.has_ended(): + View Archived Course + % else: + View Course + % endif + % endif + Unregister +
    - <% - cert_status = cert_statuses.get(course.id) - %> - % if course.has_ended() and cert_status: - <% - if cert_status['status'] == 'generating': - status_css_class = 'course-status-certrendering' - elif cert_status['status'] == 'ready': - status_css_class = 'course-status-certavailable' - elif cert_status['status'] == 'notpassing': - status_css_class = 'course-status-certnotavailable' - else: - status_css_class = 'course-status-processing' - %> -
    - - % if cert_status['status'] == 'processing': -

    Final course details are being wrapped up at - this time. Your final standing will be available shortly.

    - % elif cert_status['status'] in ('generating', 'ready'): -

    You have received a grade of - ${cert_status['grade']} - in this course.

    - % elif cert_status['status'] == 'notpassing': -

    You did not complete the necessary requirements for - completion of this course.

    - % endif - - % if cert_status['show_disabled_download_button'] or cert_status['show_download_url'] or cert_status['show_survey_button']: - - % endif -
    - - % endif - - Unregister + % endfor % else: diff --git a/lms/templates/feed.rss b/lms/templates/feed.rss index 177c2c5b12..415199141d 100644 --- a/lms/templates/feed.rss +++ b/lms/templates/feed.rss @@ -6,14 +6,41 @@ ## EdX Blog - 2012-10-14T14:08:12-07:00 + 2012-12-19T14:00:12-07:00 + + tag:www.edx.org,2012:Post/10 + 2012-12-19T14:00:00-07:00 + 2012-12-19T14:00:00-07:00 + + edX announces first wave of new courses for Spring 2013 + <img src="${static.url('images/press/releases/edx-logo_240x180.png')}" /> + <p></p> + + + tag:www.edx.org,2012:Post/9 + 2012-12-10T14:00:00-07:00 + 2012-12-10T14:00:00-07:00 + + Georgetown University joins edX + <img src="${static.url('images/press/releases/georgetown-seal_240x180.png')}" /> + <p>Sixth institution to join global movement in year one</p> + + + tag:www.edx.org,2012:Post/8 + 2012-12-04T14:00:00-07:00 + 2012-12-04T14:00:00-07:00 + + Wellesley College joins edX + <img src="${static.url('images/press/releases/wellesley-seal_240x180.png')}" /> + <p>First liberal arts college to join edX</p> + tag:www.edx.org,2012:Post/7 2012-11-12T14:00:00-07:00 2012-11-12T14:00:00-07:00 edX and Massachusetts Community Colleges join in Gates-Funded educational initiative - <img src="${static.url('images/press/releases/mass_seal_240x180.png')}" /> + <img src="${static.url('images/press/releases/mass-seal_240x180.png')}" /> <p></p> diff --git a/lms/templates/footer.html b/lms/templates/footer.html index 96c80d151d..7fe7c18ccc 100644 --- a/lms/templates/footer.html +++ b/lms/templates/footer.html @@ -6,7 +6,7 @@
  • -
  • +
  • @@ -73,33 +73,46 @@
  • -
  • + + +
    + +
      +
    1. - +
      UTx
    2. +
    3. + + +
      + WellesleyX +
      +
      +
    4. +
    5. + + +
      + GeorgetownX +
      +
      +
    -
    - %for course in universities['MITx']: - <%include file="course.html" args="course=course" /> +
      + %for course in courses: +
    • + <%include file="course.html" args="course=course" /> +
    • %endfor -
    -
    - %for course in universities['HarvardX']: - <%include file="course.html" args="course=course" /> - %endfor -
    -
    - %for course in universities['BerkeleyX']: - <%include file="course.html" args="course=course" /> - %endfor -
    +
    @@ -108,6 +121,7 @@

    edX News & Announcements

    + edX MEDIA KIT
    @@ -150,7 +164,7 @@ diff --git a/lms/templates/instructor/staff_grading.html b/lms/templates/instructor/staff_grading.html new file mode 100644 index 0000000000..085480a332 --- /dev/null +++ b/lms/templates/instructor/staff_grading.html @@ -0,0 +1,94 @@ +<%inherit file="/main.html" /> +<%block name="bodyclass">${course.css_class} +<%namespace name='static' file='/static_content.html'/> + +<%block name="headextra"> + <%static:css group='course'/> + + +<%block name="title">${course.number} Staff Grading + +<%include file="/courseware/course_navigation.html" args="active_page='staff_grading'" /> + +<%block name="js_extra"> + <%static:js group='staff_grading'/> + + +
    + +
    +

    Staff grading

    + +
    +
    +
    +
    + + +
    +

    Instructions

    +
    +

    This is the list of problems that current need to be graded in order to train the machine learning models. Each problem needs to be trained separately, and we have indicated the number of student submissions that need to be graded in order for a model to be generated. You can grade more than the minimum required number of submissions--this will improve the accuracy of machine learning, though with diminishing returns. You can see the current accuracy of machine learning while grading.

    +
    + +

    Problem List

    +
      +
    +
    + + + +
    +

    +
    +

    Problem Information

    +
    +
    +

    Maching Learning Information

    +
    +
    +
    +
    +

    Question

    +
    +
    +
    +
    +

    Grading Rubric

    +
    +
    +
    + +
    + +
    + +
    + +
    +

    Grading

    + +
    +
    +

    Student Submission

    +
    +
    +
    +
    +

    +

    + +
    + + +
    + + +
    + +
    + +
    +
    diff --git a/lms/templates/open_ended.html b/lms/templates/open_ended.html new file mode 100644 index 0000000000..cda3282a45 --- /dev/null +++ b/lms/templates/open_ended.html @@ -0,0 +1,31 @@ +
    +
    +
    + ${prompt|n} +
    + + +
    +
    + % if state == 'initial': + Unanswered + % elif state in ['done', 'post_assessment'] and correct == 'correct': + Correct + % elif state in ['done', 'post_assessment'] and correct == 'incorrect': + Incorrect + % elif state == 'assessing': + Submitted for grading + % endif + + % if hidden: +
    + % endif +
    + + + + +
    + + +
    diff --git a/lms/templates/open_ended_error.html b/lms/templates/open_ended_error.html new file mode 100644 index 0000000000..58a90f86ef --- /dev/null +++ b/lms/templates/open_ended_error.html @@ -0,0 +1,12 @@ +
    +
    +
    + There was an error with your submission. Please contact course staff. +
    +
    +
    +
    + ${errors} +
    +
    +
    \ No newline at end of file diff --git a/lms/templates/open_ended_evaluation.html b/lms/templates/open_ended_evaluation.html new file mode 100644 index 0000000000..da3f38b6a9 --- /dev/null +++ b/lms/templates/open_ended_evaluation.html @@ -0,0 +1,23 @@ +
    + ${msg|n} +
    +
    + Respond to Feedback +
    +
    +

    How accurate do you find this feedback?

    +
    +
      +
    • +
    • +
    • +
    • +
    • +
    +
    +

    Additional comments:

    + + +
    +
    +
    \ No newline at end of file diff --git a/lms/templates/open_ended_feedback.html b/lms/templates/open_ended_feedback.html new file mode 100644 index 0000000000..d8aa3d1a9e --- /dev/null +++ b/lms/templates/open_ended_feedback.html @@ -0,0 +1,17 @@ +
    +
    Feedback
    +
    +
    +

    Score: ${score}

    + % if grader_type == "ML": +

    Check below for full feedback:

    + % endif +
    +
    +
    +
    + ${ feedback | n} +
    + ${rubric_feedback | n} +
    +
    \ No newline at end of file diff --git a/lms/templates/open_ended_rubric.html b/lms/templates/open_ended_rubric.html new file mode 100644 index 0000000000..9f8a2ece4e --- /dev/null +++ b/lms/templates/open_ended_rubric.html @@ -0,0 +1,30 @@ + + % for i in range(len(rubric_categories)): + <% category = rubric_categories[i] %> + + + % for j in range(len(category['options'])): + <% option = category['options'][j] %> + + % endfor + + % endfor +
    + ${category['description']} + % if category['has_score'] == True: + (Your score: ${category['score']}) + % endif + +
    + ${option['text']} + % if option.has_key('selected'): + % if option['selected'] == True: +
    [${option['points']} points]
    + %else: +
    [${option['points']} points]
    + % endif + % else: +
    [${option['points']} points]
    + %endif +
    +
    \ No newline at end of file diff --git a/lms/templates/peer_grading/peer_grading.html b/lms/templates/peer_grading/peer_grading.html new file mode 100644 index 0000000000..598c803245 --- /dev/null +++ b/lms/templates/peer_grading/peer_grading.html @@ -0,0 +1,39 @@ +<%inherit file="/main.html" /> +<%block name="bodyclass">${course.css_class} +<%namespace name='static' file='/static_content.html'/> + +<%block name="headextra"> + <%static:css group='course'/> + + +<%block name="title">${course.number} Peer Grading + +<%include file="/courseware/course_navigation.html" args="active_page='peer_grading'" /> + +<%block name="js_extra"> + <%static:js group='peer_grading'/> + + +
    +
    +
    ${error_text}
    +

    Peer Grading

    +

    Instructions

    +

    Here are a list of problems that need to be peer graded for this course.

    + % if success: + % if len(problem_list) == 0: +
    + Nothing to grade! +
    + %else: + + %endif + %endif +
    +
    diff --git a/lms/templates/peer_grading/peer_grading_problem.html b/lms/templates/peer_grading/peer_grading_problem.html new file mode 100644 index 0000000000..9f23c0f0b1 --- /dev/null +++ b/lms/templates/peer_grading/peer_grading_problem.html @@ -0,0 +1,112 @@ + +<%inherit file="/main.html" /> +<%block name="bodyclass">${course.css_class} +<%namespace name='static' file='/static_content.html'/> + +<%block name="headextra"> + <%static:css group='course'/> + + +<%block name="title">${course.number} Peer Grading. + +<%include file="/courseware/course_navigation.html" args="active_page='peer_grading'" /> + +<%block name="js_extra"> + <%static:js group='peer_grading'/> + + + +
    +
    +
    + +
    +

    Peer Grading

    +
    +
    +

    Learning to Grade

    +
    +

    Before you can do any proper peer grading, you first need to understand how your own grading compares to that of the instrutor. Once your grades begin to match the instructor's, you will move on to grading your peers!

    +
    +
    +

    You have successfully managed to calibrate your answers to that of the instructors and have moved onto the next step in the peer grading process.

    +
    +
    +
    +

    Grading

    +
    +

    You cannot start grading until you have graded a sufficient number of training problems and have been able to demonstrate that your scores closely match that of the instructor.

    +
    +
    +

    Now that you have finished your training, you are now allowed to grade your peers. Please keep in mind that students are allowed to respond to the grades and feedback they receive.

    +
    +
    +
    + +
    +
    +
    Question
    +
    +
    +
    +
    +
    +
    +
    Rubric
    +
    +
    +
    +
    +
    + +
    + + +
    +

    Grading

    + +
    +
    +

    +
    +
    + + +
    +
    +

    +

    + +
    + + +
    + +
    + +
    +
    +
    + +
    +
    + + +
    +

    How did I do?

    +
    +
    + +
    + + +
    +

    Congratulations!

    +

    You have now completed the calibration step. You are now ready to start grading.

    + +
    + + +
    +
    diff --git a/lms/templates/self_assessment_hint.html b/lms/templates/self_assessment_hint.html index 64c45b809e..1adfc69e39 100644 --- a/lms/templates/self_assessment_hint.html +++ b/lms/templates/self_assessment_hint.html @@ -2,6 +2,6 @@
    ${hint_prompt}
    -
  • diff --git a/lms/templates/self_assessment_prompt.html b/lms/templates/self_assessment_prompt.html index 88549e9f56..2ec83ef2a7 100644 --- a/lms/templates/self_assessment_prompt.html +++ b/lms/templates/self_assessment_prompt.html @@ -1,14 +1,16 @@ -
    +
    ${prompt}
    - +
    +
    +
    ${initial_rubric}
    ${initial_hint}
    @@ -16,5 +18,4 @@
    ${initial_message}
    -
    diff --git a/lms/templates/self_assessment_rubric.html b/lms/templates/self_assessment_rubric.html index 5bcb3bba93..2d32ffe8d3 100644 --- a/lms/templates/self_assessment_rubric.html +++ b/lms/templates/self_assessment_rubric.html @@ -1,7 +1,7 @@

    Self-assess your answer with this rubric:

    - ${rubric} + ${rubric | n }
    % if not read_only: diff --git a/lms/templates/signup_modal.html b/lms/templates/signup_modal.html index 96b2e33ac3..22a4a93499 100644 --- a/lms/templates/signup_modal.html +++ b/lms/templates/signup_modal.html @@ -26,7 +26,7 @@ - + % else:

    Welcome ${extauth_email}


    diff --git a/lms/templates/static_templates/faq.html b/lms/templates/static_templates/faq.html index a8f6268bd5..91ac16e90c 100644 --- a/lms/templates/static_templates/faq.html +++ b/lms/templates/static_templates/faq.html @@ -12,77 +12,71 @@ Press Contact + +

    Organization

    -

    What is edX?

    -

    edX is a not-for-profit enterprise of its founding partners, the Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

    -

    EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in Summer 2013, edX will also offer UTx (University of Texas) classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning – both on-campus and online throughout the world.

    +

    What is edX?

    +

    edX is a not-for-profit enterprise of its founding partners, the Massachusetts Institute of Technology (MIT) and Harvard University that offers online learning to on-campus students and to millions of people around the world. To do so, edX is building an open-source online learning platform and hosts an online web portal at www.edx.org for online education.

    +

    EdX currently offers HarvardX, MITx and BerkeleyX classes online for free. Beginning in fall 2013, edX will offer WellesleyX and GeorgetownX classes online for free. The University of Texas System includes nine universities and six health institutions. The edX institutions aim to extend their collective reach to build a global community of online students. Along with offering online courses, the three universities undertake research on how students learn and how technology can transform learning both on-campus and online throughout the world.

    +
    -

    Why is The University of Texas System joining edX?

    -

    Joining edX not only allows UT faculty to showcase their work on a global stage, but also provides UT students the opportunity to take classes from their choice of UT institutions, as well as MIT, Harvard, UC Berkeley and future “X” Universities.

    -

    The UT System closely examined all the alternatives and determined that edX offered the best fit in terms of alignment of mission, platform and revenue model. The strength and reputation of the partner institutions – MIT, Harvard and UC Berkeley – was also a huge consideration. EdX is committed to both blended and online learning and to a non-profit, open source model. It is also governed by a board of academics with a commitment to excellence in learning.

    -
    -
    -

    What will The UT System’s direct participation entail?

    -

    The UT System will begin by offering one course on edX from The University of Texas at Austin in Summer 2013, and four courses in Fall 2013, likely at least one of those courses from one of its health institutions. The UT System is also making a $5 million investment in the edX platform. We will explore, experiment and innovate together.

    -
    -
    -

    Will edX be adding additional X Universities?

    -

    More than 140 institutions from around the world have expressed interest in collaborating with edX since Harvard and MIT announced its creation in May. EdX is focused above all on quality and developing the best not-for-profit model for online education. In addition to providing online courses on the edX platform, the “X University” Consortium will be a forum in which members can share experiences around online learning. Harvard, MIT, UC Berkeley and the UT System will work collaboratively to establish the “X University” Consortium, whose membership will expand to include additional “X Universities” as soon as possible. Each member of the consortium will offer courses on the edX platform as an “X University.” The gathering of many universities’ educational content together on one site will enable learners worldwide to access the course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

    -

    EdX will actively explore the addition of other institutions from around the world to the edX platform, and we look forward to adding more “X Universities” as capacity increases.

    +

    Will edX be adding additional X Universities?

    +

    More than 200 institutions from around the world have expressed interest in collaborating with edX since Harvard and MIT announced its creation in May. EdX is focused above all on quality and developing the best not-for-profit model for online education. In addition to providing online courses on the edX platform, the "X University" Consortium will be a forum in which members can share experiences around online learning. Harvard, MIT, UC Berkeley, the University of Texas system and the other consortium members will work collaboratively to establish the "X University" Consortium, whose membership will expand to include additional "X Universities". Each member of the consortium will offer courses on the edX platform as an "X University." The gathering of many universities' educational content together on one site will enable learners worldwide to access the offered course content of any participating university from a single website, and to use a set of online educational tools shared by all participating universities.

    +

    edX will actively explore the addition of other institutions from around the world to the edX platform, and looks forward to adding more "X Universities."

    Students

    -

    Who can take edX courses? Will there be an admissions process?

    -

    EdX will be available to anyone in the world with an internet connection, and in general, there will not be an admissions process.

    +

    Who can take edX courses? Will there be an admissions process?

    +

    EdX will be available to anyone in the world with an internet connection, and in general, there will not be an admissions process.

    -

    Will certificates be awarded?

    -

    Yes. Online learners who demonstrate mastery of subjects can earn a certificate of completion. Certificates will be issued by edX under the name of the underlying "X University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX. For the courses in Fall 2012, those certificates will be free. There is a plan to charge a modest fee for certificates in the future.

    +

    Will certificates be awarded?

    +

    Yes. Online learners who demonstrate mastery of subjects can earn a certificate of mastery. Certificates will be issued by edX under the name of the underlying "X University" from where the course originated, i.e. HarvardX, MITx or BerkeleyX. For the courses in Fall 2012, those certificates will be free. There is a plan to charge a modest fee for certificates in the future.

    -

    What will the scope of the online courses be? How many? Which faculty?

    -

    Our goal is to offer a wide variety of courses across disciplines. There are currently seven courses offered for Fall 2012.

    +

    What will the scope of the online courses be? How many? Which faculty?

    +

    Our goal is to offer a wide variety of courses across disciplines. There are currently nine courses offered for Fall 2012.

    -

    Who is the learner? Domestic or international? Age range?

    -

    Improving teaching and learning for students on our campuses is one of our primary goals. Beyond that, we don’t have a target group of potential learners, as the goal is to make these courses available to anyone in the world – from any demographic – who has interest in advancing their own knowledge. The only requirement is to have a computer with an internet connection. More than 150,000 students from over 160 countries registered for MITx's first course, 6.002x: Circuits and Electronics. The age range of students certified in this course was from 14 to 74 years-old.

    +

    Who is the learner? Domestic or international? Age range?

    +

    Improving teaching and learning for students on our campuses is one of our primary goals. Beyond that, we don't have a target group of potential learners, as the goal is to make these courses available to anyone in the world - from any demographic - who has interest in advancing their own knowledge. The only requirement is to have a computer with an internet connection. More than 150,000 students from over 160 countries registered for MITx's first course, 6.002x: Circuits and Electronics. The age range of students certified in this course was from 14 to 74 years-old.

    -

    Will participating universities’ standards apply to all courses offered on the edX platform?

    -

    Yes: the reach changes exponentially, but the rigor remains the same.

    +

    Will participating universities' standards apply to all courses offered on the edX platform?

    +

    Yes: the reach changes exponentially, but the rigor remains the same.

    -

    How do you intend to test whether this approach is improving learning?

    -

    Edx institutions have assembled faculty members who will collect and analyze data to assess results and the impact edX is having on learning.

    +

    How do you intend to test whether this approach is improving learning?

    +

    Edx institutions have assembled faculty members who will collect and analyze data to assess results and the impact edX is having on learning.

    -

    How may I apply to study with edX?

    -

    Simply complete the online signup form. Enrolling will create your unique student record in the edX database, allow you to register for classes, and to receive a certificate on successful completion.

    +

    How may I apply to study with edX?

    +

    Simply complete the online signup form. Enrolling will create your unique student record in the edX database, allow you to register for classes, and to receive a certificate on successful completion.

    -

    How may another university participate in edX?

    -

    If you are from a university interested in discussing edX, please email university@edx.org

    +

    How may another university participate in edX?

    +

    If you are from a university interested in discussing edX, please email university@edx.org

    Technology Platform

    -

    What technology will edX use?

    -

    The edX open-source online learning platform will feature interactive learning designed specifically for the web. Features will include: self-paced learning, online discussion groups, wiki-based collaborative learning, assessment of learning as a student progresses through a course, and online laboratories and other interactive learning tools. The platform will also serve as a laboratory from which data will be gathered to better understand how students learn. Because it is open source, the platform will be continuously improved by a worldwide community of collaborators, with new features added as needs arise.

    -

    The first version of the technology was used in the first MITx course, 6.002x Circuits and Electronics, which launched in Spring, 2012.

    +

    What technology will edX use?

    +

    The edX open-source online learning platform will feature interactive learning designed specifically for the web. Features will include: self-paced learning, online discussion groups, wiki-based collaborative learning, assessment of learning as a student progresses through a course, and online laboratories and other interactive learning tools. The platform will also serve as a laboratory from which data will be gathered to better understand how students learn. Because it is open source, the platform will be continuously improved by a worldwide community of collaborators, with new features added as needs arise.

    +

    The first version of the technology was used in the first MITx course, 6.002x Circuits and Electronics, which launched in Spring, 2012.

    -

    How is this different from what other universities are doing online?

    -

    EdX is a not-for-profit enterprise built upon the shared educational missions of its founding partners, Harvard University and MIT. The edX platform will be available as open source. Also, a primary goal of edX is to improve teaching and learning on campus by experimenting with blended models of learning and by supporting faculty in conducting significant research on how students learn.

    +

    How is this different from what other universities are doing online?

    +

    EdX is a not-for-profit enterprise built upon the shared educational missions of its founding partners, Harvard University and MIT. The edX platform will be available as open source. Also, a primary goal of edX is to improve teaching and learning on campus by experimenting with blended models of learning and by supporting faculty in conducting significant research on how students learn.

    @@ -90,7 +84,6 @@ @@ -98,5 +91,5 @@
    %if user.is_authenticated(): - <%include file="../signup_modal.html" /> +<%include file="../signup_modal.html" /> %endif diff --git a/lms/templates/static_templates/help.html b/lms/templates/static_templates/help.html index 7d1748776c..04c9164289 100644 --- a/lms/templates/static_templates/help.html +++ b/lms/templates/static_templates/help.html @@ -5,40 +5,304 @@ <%block name="title">edX Help +<%block name="js_extra"> + + +

    Help


    -
    -

    I tried to sign up, but it says the username is already taken.

    -

    If you have previously signed up for an MITx account, you already have an edX account and can log in with your existing username and password. If you don’t have an MITx account and received this error, it's possible that someone else has already signed up with that username. Please try a different, more unique username – for example, try adding a random number to the end.

    -
    -
    -

    How will I know that the course I have signed up for has started?

    -

    The start date for each course is listed on the right-hand side of the Course About page.

    -
    -
    -

    I just signed up into edX. I have not received any form of acknowledgement that I have enrolled.

    -

    You should receive a single activation e-mail. If you did not, it may be because:

    -
      -
    • There was a typo in your e-mail address.
    • -
    • The activation e-mail was caught by your spam filter. Please check your spam folder.
    • -
    • You may be using an older browser. We recommend downloading the current version of Firefox or Chrome.
    • -
    • JavaScript is disabled in your browser. Please confirm it is enabled.
    • -
    • If you run into issues, try recreating your account. There is no need to do anything about the old account, if any. If it is not activated through the link in the e-mail, it will disappear later.
    • -
    -
    -
    +
    +
    +

    edX Basics

    +
    +

    How do I sign up to take a class?

    +
    +

    Simply create an edX account (it's free) and then register for the course of your choice (also free). Follow the prompts on the edX website.

    +
    +
    +
    +

    What does it cost to take a class? Is this really free?

    +
    +

    EdX courses are free for everyone. All you need is an Internet connection.

    +
    +
    +
    +

    What happens after I sign up for a course?

    +
    +

    You will receive an activation email. Follow the prompts in that email to activate your account. You will need to log in each time you access your course(s). Once the course begins, it’s time to hit the virtual books. You can access the lectures, homework, tutorials, etc., for each week, one week at a time.

    +
    +
    +
    +

    Who can take an edX course?

    +
    +

    You, your mom, your little brother, your grandfather -- anyone with Internet access can take an edX course. Free.

    +
    +
    +
    +

    Are the courses only offered in English?

    +
    +

    Some edX courses include a translation of the lecture in the text bar to the right of the video. Some have the specific option of requesting a course in other languages. Please check your course to determine foreign language options.

    +
    +
    +
    +

    When will there be more courses on other subjects?

    +
    +

    We are continually reviewing and creating courses to add to the edX platform. Please check the website for future course announcements. You can also "friend" edX on Facebook – you’ll receive updates and announcements.

    +
    +
    +
    +

    How can I help edX?

    +
    +

    You may not realize it, but just by taking a course you are helping edX. That’s because the edX platform has been specifically designed to not only teach, but also gather data about learning. EdX will utilize this data to find out how to improve education online and on-campus.

    +
    +
    +
    +

    When does my course start and/or finish?

    +
    +

    You can find the start and stop dates for each course on each course description page.

    +
    +
    +
    +

    Is there a walk-through of a sample course session?

    +
    +

    There are video introductions for every course that will give you a good sense of how the course works and what to expect.

    +
    +
    +
    +

    I don't have the prerequisites for a course that I am interested in. Can I still take the course?

    +
    +

    We do not check students for prerequisites, so you are allowed to attempt the course. However, if you do not know prerequisite subjects before taking a class, you will have to learn the prerequisite material on your own over the semester, which can be an extremely difficult task.

    +
    +
    +
    +

    What happens if I have to quit a course, are there any penalties, will I be able to take another course in the future?

    +
    +

    You may unregister from an edX course at anytime, there are absolutely no penalties associated with incomplete edX studies, and you may register for the same course (provided we are still offering it) at a later time.

    +
    +
    +
    -
    -

    Help email

    +
    +

    The Classes

    +
    +

    How much work will I have to do to pass my course?

    +
    +

    The projected hours of study required for each course are described on the specific course description page.

    +
    +
    +
    +

    What should I do before I take a course (prerequisites)?

    +
    +

    Each course is different – some have prerequisites, and some don’t. Take a look at your specific course’s recommended prerequisites. If you do not have a particular prerequisite, you may still take the course.

    +
    +
    +
    +

    What books should I read? (I am interested in reading materials before the class starts).

    +
    +

    Take a look at the specific course prerequisites. All required academic materials will be provided during the course, within the browser. Some of the course descriptions may list additional resources. For supplemental reading material before or during the course, you can post a question on the course’s Discussion Forum to ask your online coursemates for suggestions.

    +
    +
    +
    +

    Can I download the book for my course?

    +
    +

    EdX book content may only be viewed within the browser, and downloading it violates copyright laws. If you need or want a hard copy of the book, we recommend that you purchase a copy.

    +
    +
    +
    +

    Can I take more than one course at a time?

    +
    +

    You may take multiple edX courses, however we recommend checking the requirements on each course description page to determine your available study hours and the demands of the intended courses.

    +
    +
    +
    +

    How do I log in to take an edX class?

    +
    +

    Once you sign up for a course and activate your account, click on the "Log In" button on the edx.org home page. You will need to type in your email address and edX password each time you log in.

    +
    +
    +
    +

    What time is the class?

    +
    +

    EdX classes take place at your convenience. Prefer to sleep in and study late? No worries. Videos and problem sets are available 24 hours a day, which means you can watch video and complete work whenever you have spare time. You simply log in to your course via the Internet and work through the course material, one week at a time.

    +
    +
    +
    +

    If I miss a week, how does this affect my grade?

    +
    +

    It is certainly possible to pass an edX course if you miss a week; however, coursework is progressive, so you should review and study what you may have missed. You can check your progress dashboard in the course to see your course average along the way if you have any concerns.

    +
    +
    +
    +

    How can I meet/find other students?

    +
    +

    All edX courses have Discussion Forums where you can chat with and help each other within the framework of the Honor Code.

    +
    +
    +
    +

    How can I talk to professors, fellows and teaching assistants?

    +
    +

    The Discussion Forums are the best place to reach out to the edX teaching team for your class, and you don’t have to wait in line or rearrange your schedule to fit your professor’s – just post your questions. The response isn’t always immediate, but it’s usually pretty darned quick.

    +
    +
    +
    + +
    +

    Getting Help

    +
    +

    Can I re-take a course?

    +
    +

    Good news: there are unlimited "mulligans" in edX. You may re-take edX courses as often as you wish. Your performance in any particular offering of a course will not effect your standing in future offerings of any edX course, including future offerings of the same course.

    +
    +
    +
    +

    Enrollment for a course that I am interested in is open, but the course has already started. Can I still enroll?

    +
    +

    Yes, but you will not be able to turn in any assignments or exams that have already been due. If it is early in the course, you might still be able to earn enough points for a certificate, but you will have to check with the course in question in order to find out more.

    +
    +
    +
    +

    Is there an exam at the end?

    +
    +

    Different courses have slightly different structures. Please check the course material description to see if there is a final exam or final project.

    +
    +
    +
    +

    Will the same courses be offered again in the future?

    +
    +

    Existing edX courses will be re-offered, and more courses added.

    +
    +
    +
    +

    Will I get a certificate for taking an edX course?

    +
    +

    Online learners who receive a passing grade for a course will receive a certificate of mastery from edX and the underlying X University that offered the course. For example, a certificate of mastery for MITx’s 6.002x Circuits & Electronics will come from edX and MITx.

    +
    +
    +
    +

    How are edX certificates delivered?

    +
    +

    EdX certificates are delivered online through edx.org. So be sure to check your email in the weeks following the final grading – you will be able to download and print your certificate.

    +
    +
    +
    +

    What is the difference between a proctored certificate and an honor code certificate?

    +
    +

    A proctored certificate is given to students who take and pass an exam under proctored conditions. An honor-code certificate is given to students who have completed all of the necessary online coursework associated with a course and have signed the edX honor code .

    +
    +
    +
    +

    Yes. The requirements for both certificates can be independently satisfied.

    +
    +

    It is certainly possible to pass an edX course if you miss a week; however, coursework is progressive, so you should review and study what you may have missed. You can check your progress dashboard in the course to see your course average along the way if you have any concerns.

    +
    +
    +
    +

    Will my grade be shown on my certificate?

    +
    +

    No. Grades are not displayed on either honor code or proctored certificates.

    +
    +
    +
    +

    How can I talk to professors, fellows and teaching assistants?

    +
    +

    The Discussion Forums are the best place to reach out to the edX teaching team for your class, and you don’t have to wait in line or rearrange your schedule to fit your professor’s – just post your questions. The response isn’t always immediate, but it’s usually pretty darned quick.

    +
    +
    +
    +

    The only certificates distributed with grades by edX were for the initial prototype course.

    +
    +

    You may unregister from an edX course at anytime, there are absolutely no penalties associated with incomplete edX studies, and you may register for the same course (provided we are still offering it) at a later time.

    +
    +
    +
    +

    Will my university accept my edX coursework for credit?

    +
    +

    Each educational institution makes its own decision regarding whether to accept edX coursework for credit. Check with your university for its policy.

    +
    +
    +
    +

    I lost my edX certificate – can you resend it to me?

    +
    +

    Please log back in to your account to find certificates from the same profile page where they were originally posted. You will be able to re-print your certificate from there.

    +
    +
    +
    + +
    +

    edX & Open Source

    +
    +

    What’s open source?

    +
    +

    Open source is a philosophy that generally refers to making software freely available for use or modification as users see fit. In exchange for use of the software, users generally add their contributions to the software, making it a public collaboration. The edX platform will be made available as open source code in order to allow world talent to improve and share it on an ongoing basis.

    +
    +
    +
    +

    When/how can I get the open-source platform technology?

    +
    +

    We are still building the edX technology platform and will be making announcements in the future about its availability.

    +
    +
    +
    + +
    +

    Other Help Questions - Account Questions

    +
    +

    My username is taken.

    +
    +

    Now’s your chance to be creative: please try a different, more unique username – for example, try adding a random number to the end.

    +
    +
    +
    +

    Why does my password show on my course login page?

    +
    +

    Oops! This may be because of the way you created your account. For example, you may have mistakenly typed your password into the login box.

    +
    +
    +
    +

    I am having login problems (password/email unrecognized).

    +
    +

    Please check your browser’s settings to make sure that you have the current version of Firefox or Chrome, and then try logging in again. If you find access impossible, you may simply create a new account using an alternate email address – the old, unused account will disappear later.

    +
    +
    +
    +

    I did not receive an activation email.

    +
    +

    If you did not receive an activation email it may be because:

    +
      +
    • There was a typo in your email address.
    • +
    • Your spam filter may have caught the activation email. Please check your spam folder.
    • +
    • You may be using an older browser. We recommend downloading the current version of Firefox or Chrome.
    • +
    • JavaScript is disabled in your browser. Please check your browser settings and confirm that JavaScript is enabled.
    • +
    +

    If you continue to have problems activating your account, we recommend that you try creating a new account. There is no need to do anything about the old account. If it is not activated through the link in the email, it will disappear later.

    +
    +
    +
    +

    Can I delete my account?

    +
    +

    There’s no need to delete you account. An old, unused edX account with no course completions associated with it will disappear.

    +
    +
    +
    +

    I am experiencing problems with the display. E.g., There are tools missing from the course display, or I am unable to view video.

    +
    +

    Please check your browser and settings. We recommend downloading the current version of Firefox or Chrome. Alternatively, you may re-register with a different email account. There is no need to delete the old account, as it will disappear if unused.

    +
    +
    +
    + +
    + + -
    diff --git a/lms/templates/static_templates/jobs.html b/lms/templates/static_templates/jobs.html index 15fcbfcdca..f2752a0939 100644 --- a/lms/templates/static_templates/jobs.html +++ b/lms/templates/static_templates/jobs.html @@ -5,7 +5,8 @@

    Do You Want to Change the Future of Education?

    -
    + +
    @@ -27,23 +28,133 @@
    -
    + +
    +
    +
    -

    We're hiring!

    -

    Are you passionate? Want to help change the world? Good, you've found the right company! We're growing and our team needs the best and brightest in creating the next evolution in interactive online education.

    -

    Want to apply to edX?

    -

    Send your resume and cover letter to jobs@edx.org.

    -

    Note: We'll review each and every resume but please note you may not get a response due to the volume of inquiries.

    +

    EdX is looking to add new talent to our team!

    +

    Our mission is to give a world-class education to everyone, everywhere, regardless of gender, income or social status

    +

    Today, EdX.org, a not-for-profit provides hundreds of thousands of people from around the globe with access to free education.  We offer amazing quality classes by the best professors from the best schools. We enable our members to uncover a new passion that will transform their lives and their communities.

    +

    Around the world-from coast to coast, in over 192 countries, people are making the decision to take one or several of our courses. As we continue to grow our operations, we are looking for talented, passionate people with great ideas to join the edX team. We aim to create an environment that is supportive, diverse, and as fun as our brand. If you're results-oriented, dedicated, and ready to contribute to an unparalleled member experience for our community, we really want you to apply.

    +

    As part of the edX team, you’ll receive:

    +
      +
    • Competitive compensation
    • +
    • Generous benefits package
    • +
    • Free lunch every day
    • +
    • A great working experience where everyone cares
    • +
    +

    While we appreciate every applicant's interest, only those under consideration will be contacted. We regret that phone calls will not be accepted.

    +
    + +
    +
    +

    INSTRUCTIONAL DESIGNER — CONTRACT OPPORTUNITY

    +

    The Instructional Designer will work collaboratively with the edX content and engineering teams to plan, develop and deliver highly engaging and media rich online courses. The Instructional Designer will be a flexible thinker, able to determine and apply sound pedagogical strategies to unique situations and a diverse set of academic disciplines.

    +

    Responsibilities:

    +
      +
    • Work with the video production team, product managers and course staff on the implementation of instructional design approaches in the development of media and other course materials.
    • +
    • Based on course staff and faculty input, articulate learning objectives and align them to design strategies and assessments.
    • +
    • Develop flipped classroom instructional strategies in coordination with community college faculty.
    • +
    • Produce clear and instructionally effective copy, instructional text, and audio and video scripts
    • +
    • Identify and deploy instructional design best practices for edX course staff and faculty as needed.
    • +
    • Create course communication style guides. Train and coach teaching staff on best practices for communication and discussion management.
    • +
    • Serve as a liaison to instructional design teams based at X universities.
    • +
    • Consult on peer review processes to be used by learners in selected courses.
    • +
    • Ability to apply game-based learning theory and design into selected courses as appropriate.
    • +
    • Use learning analytics and metrics to inform course design and revision process.
    • +
    • Collaborate with key research and learning sciences stakeholders at edX and partner institutions for the development of best practices for MOOC teaching and learning and course design.
    • +
    • Support the development of pilot courses and modules used for sponsored research initiatives.
    • +
    +

    Qualifications:

    +
      +
    • Master's Degree in Educational Technology, Instructional Design or related field. Experience in higher education with additional experience in a start-up or research environment preferable.
    • +
    • Excellent interpersonal and communication (written and verbal), project management, problem-solving and time management skills. The ability to be flexible with projects and to work on multiple courses essential. Ability to meet deadlines and manage expectations of constituents.
    • +
    • Capacity to develop new and relevant technology skills. Experience using game theory design and learning analytics to inform instructional design decisions and strategy.
    • +
    • Technical Skills: Video and screencasting experience. LMS Platform experience, xml, HTML, CSS, Adobe Design Suite, Camtasia or Captivate experience. Experience with web 2.0 collaboration tools.
    • +
    +

    Eligible candidates will be invited to respond to an Instructional Design task based on current or future edX course development needs.

    +

    If you are interested in this position, please send an email to jobs@edx.org.

    +
    +
    + +
    +
    +

    MEMBER SERVICES MANAGER

    +

    The edX Member Services Manager is responsible for both defining support best practices and directly supporting edX members by handling or routing issues that come in from our websites, email and social media tools.  We are looking for a passionate person to help us define and own this experience. While this is a Manager level position, we see this candidate quickly moving through the ranks, leading a larger team of employees over time. This staff member will be running our fast growth support organization.

    +

    Responsibilities:

    +
      +
    • Define and rollout leading technology, best practices and policies to support a growing team of member care representatives.
    • +
    • Provide reports and visibility into member care metrics.
    • +
    • Identify a staffing plan that mirrors growth and work to grow the team with passionate, member-first focused staff.
    • +
    • Manage member services staff to predefined service levels.
    • +
    • Resolve issues according to edX policies; escalates non-routine issues.
    • +
    • Educate members on edX policies and getting started
    • +
    • May assist new members with edX procedures and processing registration issues.
    • +
    • Provides timely follow-up and resolution to issues.
    • +
    • A passion for doing the right thing - at edX the member is always our top priority
      +
    • +
    +

    Qualifications:

    +
      +
    • 5-8 years in a call center or support team management
    • +
    • Exemplary customer service skills
    • +
    • Experience in creating and rolling out support/service best practices
    • +
    • Solid computer skills – must be fluent with desktop applications and have a basic understanding of web technologies (i.e. basic HTML)
    • +
    • Problem solving - the individual identifies and resolves problems in a timely manner, gathers and analyzes information skillfully and maintains confidentiality.
    • +
    • Interpersonal skills - the individual maintains confidentiality, remains open to others' ideas and exhibits willingness to try new things.
    • +
    • Oral communication - the individual speaks clearly and persuasively in positive or negative situations and demonstrates group presentation skills.
    • +
    • Written communication – the individual edits work for spelling and grammar, presents numerical data effectively and is able to read and interpret written information.
    • +
    • Adaptability - the individual adapts to changes in the work environment, manages competing demands and is able to deal with frequent change, delays or unexpected events.
    • +
    • Dependability - the individual is consistently at work and on time, follows instructions, responds to management direction and solicits feedback to improve performance.
    • +
    • College degree
    • +
    +

    If you are interested in this position, please send an email to jobs@edx.org.

    +
    +
    + +
    +
    +

    DIRECTOR OF PR AND COMMUNICATIONS

    +

    The edX Director of PR & Communications is responsible for creating and executing all PR strategy and providing company-wide leadership to help create and refine the edX core messages and identity as the revolutionary global leader in both on-campus and worldwide education. The Director will design and direct a communications program that conveys cohesive and compelling information about edX's mission, activities, personnel and products while establishing a distinct identity for edX as the leader in online education for both students and learning institutions.

    +

    Responsibilities:

    +
      +
    • Develop and execute goals and strategy for a comprehensive external and internal communications program focused on driving student engagement around courses and institutional adoption of the edX learning platform.
    • +
    • Work with media, either directly or through our agency of record, to establish edX as the industry leader in global learning.
    • +
    • Work with key influencers including government officials on a global scale to ensure the edX mission, content and tools are embraced and supported worldwide.
    • +
    • Work with marketing colleagues to co-develop and/or monitor and evaluate the content and delivery of all communications messages and collateral.
    • +
    • Initiate and/or plan thought leadership events developed to heighten target-audience awareness; participate in meetings and trade shows
    • +
    • Conduct periodic research to determine communications benchmarks
    • +
    • Inform employees about edX's vision, values, policies, and strategies to enable them to perform their jobs efficiently and drive morale.
    • +
    • Work with and manage existing communications team to effectively meet strategic goals.
    • +
    +

    Qualifications:

    +
      +
    • Ten years of experience in PR and communications
    • +
    • Ability to work creatively and provide company-wide leadership in a fast-paced, dynamic start-up environment required
    • +
    • Adaptability - the individual adapts to changes in the work environment, manages competing demands and is able to deal with frequent change, delays or unexpected events.
    • +
    • Experience in working in successful consumer-focused startups preferred
    • +
    • PR agency experience in setting strategy for complex multichannel, multinational organizations a plus.
    • +
    • Extensive writing experience and simply amazing oral, written, and interpersonal communications skills
    • +
    • B.A./B.S. in communications or related field
    • +
    +

    If you are interested in this position, please send an email to jobs@edx.org.

    +
    +
    +
    +
    - - - - +

    Positions

    +

    How to Apply

    E-mail your resume, coverletter and any other materials to jobs@edx.org

    Our Location

    diff --git a/lms/templates/static_templates/media-kit.html b/lms/templates/static_templates/media-kit.html new file mode 100644 index 0000000000..458cfb8e15 --- /dev/null +++ b/lms/templates/static_templates/media-kit.html @@ -0,0 +1,111 @@ +<%namespace name='static' file='../static_content.html'/> +<%inherit file="../main.html" /> + +<%block name="title">edX Media Kit + +
    +

    edX Media Kit

    + +
    + +
    +
    +
    +

    Welcome to the edX Media Kit

    +
    + +
    +

    Need images for a news story? Feel free to download high-resolution versions of the photos below by clicking on the thumbnail. Please credit edX in your use.

    +

    We’ve included visual guidelines on how to use the edX logo within the download zip which also includes Adobe Illustrator and eps versions of the logo.

    +

    For more information about edX, please contact Dan O'Connell Associate Director of Communications via oconnell@edx.org.

    +
    + + + +
    + +
    +
    +

    The edX Media Library

    +
    + +
    + +
    +
    +
    +
    + +<%block name="js_extra"> + + \ No newline at end of file diff --git a/lms/templates/static_templates/press_releases/Georgetown_joins_edX.html b/lms/templates/static_templates/press_releases/Georgetown_joins_edX.html new file mode 100644 index 0000000000..310a4ced5e --- /dev/null +++ b/lms/templates/static_templates/press_releases/Georgetown_joins_edX.html @@ -0,0 +1,73 @@ +<%! from django.core.urlresolvers import reverse %> +<%inherit file="../../main.html" /> + +<%namespace name='static' file='../../static_content.html'/> + +<%block name="title">Georgetown University joins edX +
    + + +
    + +
    +

    Georgetown University joins edX

    +
    +
    +

    Georgetown becomes sixth institution to join global movement in year one, Broadens course options and brings its unique mission-driven perspective to the world of online learning

    + +

    CAMBRIDGE, MA — December 10, 2012 — EdX, the not-for-profit online learning initiative founded by Harvard University and the Massachusetts Institute of Technology (MIT), announced today the addition of Georgetown University to its group of educational leaders who are focused on providing a category-leading, quality higher education experience to the global online community.

    + +

    “It is a privilege to partner with edX and this extraordinary collection of universities,” said Dr. John J. DeGioia, President of Georgetown University. “Our Catholic and Jesuit identity compels us to work at the frontiers of excellence in higher education, and we see in this partnership an exciting opportunity to more fully realize this mission. Not only will it enrich our capacity to serve our global family–beyond our campuses here in Washington, D.C.–but it will also allow us to extend the applications of our research and our scholarship.”

    + +

    Georgetown University, the nation’s oldest Catholic and Jesuit university, is one of the world’s leading academic and research institutions, offering a unique educational experience that prepares the next generation of global citizens to lead and make a difference in the world. Students receive a world-class learning experience focused on educating the whole person through exposure to different faiths, cultures and beliefs. Georgetown University will provide a series of GeorgetownX courses to the open source platform and broaden the course offerings available on edx.org.

    + +

    “We welcome Georgetown University to edX,” said Anant Agarwal, President of edX. “Georgetown has a long history of research and educational excellence, with a demonstrated commitment to the arts and sciences, foreign service, law, medicine, public policy, business, and nursing and health studies. Georgetown, with its distinguished presence around the world including a School of Foreign Service campus in Qatar, shares with edX a global perspective and a mission to expand educational opportunities.”

    + +

    Through edX, the “X Universities” will provide interactive education wherever there is access to the Internet. They will enhance teaching and learning through research about how students learn, and how technologies and game-like experiences can facilitate effective teaching both on-campus and online. The University of California, Berkeley joined edX in July, the University of Texas System joined in October, and Wellesley College joined earlier in December.

    + +

    “Georgetown University is an excellent addition to edX,” said MIT President L. Rafael Reif. “It brings important strength in many areas of scholarship and has long had an especially powerful voice in public life and discourse. The edX community stands to benefit greatly from what Georgetown will offer.”

    + +

    “EdX is an innovation that will expand access to high-quality educational content for millions around the world while helping us better understand how technology can improve the academic experience for students in classrooms across our campuses,” said Harvard President Drew Faust. “Georgetown’s commitment to technology enhanced learning, its excellence in education, and its long history as an institution dedicated to public service make it a welcome addition to edX.”

    + +

    GeorgetownX will offer courses on edX beginning in the fall of 2013. All of the courses will be hosted from edX’s innovative platform at www.edx.org.

    + +

    About edX

    + +

    edX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology that features learning designed specifically for interactive study via the web. Based on a long history of collaboration and their shared educational missions the founders are creating a new online-learning experience. Anant Agarwal, former Director of MIT’s Computer Science and Artificial Intelligence Laboratory, serves as the first president of edX. Along with offering online courses, the institutions will use edX to research how students learn and how technology can transform learning-both on-campus and worldwide. EdX is based in Cambridge, Massachusetts and is governed by MIT and Harvard.

    + +

    About Georgetown University

    + +

    Georgetown University is the oldest Catholic and Jesuit university in America, founded in 1789 by Archbishop John Carroll. Georgetown today is a major student-centered, international, research university offering respected undergraduate, graduate and professional programs from its home in Washington, D.C. For more information about Georgetown University, visit www.georgetown.edu.

    + +
    +

    Contact: Brad Baker

    +

    BBaker@webershandwick.com

    +

    617-520-7043

    +
    +
    + + +
    +
    +
    diff --git a/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html b/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html new file mode 100644 index 0000000000..77e7beb5f7 --- /dev/null +++ b/lms/templates/static_templates/press_releases/Spring_2013_course_announcements.html @@ -0,0 +1,75 @@ +<%! from django.core.urlresolvers import reverse %> +<%inherit file="../../main.html" /> + +<%namespace name='static' file='../../static_content.html'/> + +<%block name="title">EdX expands platform, announces first wave of courses for spring 2013 +
    + + +
    +
    +

    EdX expands platform, announces first wave of courses for spring 2013

    +
    + +
    +

    Leading minds from top universities to offer world-wide MOOC courses on statistics, history, justice, and poverty

    + +

    CAMBRIDGE, MA – December 19, 2012 —EdX, the not-for-profit online learning initiative founded by Harvard University and the Massachusetts Institute of Technology (MIT), announced today its initial spring 2013 schedule including its first set of courses in the humanities and social sciences – introductory courses with wide, global appeal. In its second semester, edX expands its online courses to a variety of subjects ranging from the ancient Greek hero to the riddle of world poverty, all taught by experts at some of the world’s leading universities. EdX is also bringing back several courses from its popular offerings in the fall semester.

    + +

    “EdX is both revolutionizing and democratizing education,” said Anant Agarwal, President of edX. “In just eight months we’ve attracted more than half a million unique users from around the world to our learning portal. Now, with these spring courses we are entering a new era – and are poised to touch millions of lives with the best courses from the best faculty at the best institutions in the world.”

    + +

    Building on the success of its initial offerings, edX is broadening the courses on its innovative educational platform. In its second semester – now open for registration – edX continues with courses from some of the world’s most esteemed faculty from UC Berkeley, Harvard and MIT. Spring 2013 courses include:

    + + + +

    “I'm delighted to have my Justice course on edX,” said Michael Sandel, Ann T. and Robert M. Bass Professor of Government at Harvard University, “where students everywhere will be able to engage in a global dialogue about the big moral and civic questions of our time.”

    + +

    In addition to these new courses, edX is bringing back several courses from the popular fall 2012 semester: Introduction to Computer Science and Programming; Introduction to Solid State Chemistry; Introduction to Artificial Intelligence; Software as a Service I; Software as a Service II; Foundations of Computer Graphics.

    + +

    This spring also features Harvard's Copyright, taught by Harvard Law School professor William Fisher III, former law clerk to Justice Thurgood Marshall and expert on the hotly debated U.S. copyright system, which will explore the current law of copyright and the ongoing debates concerning how that law should be reformed. Copyright will be offered as an experimental course, taking advantage of different combinations and uses of teaching materials, educational technologies, and the edX platform. 500 learners will be selected through an open application process that will run through January 3rd 2013.

    + +

    These new courses would not be possible without the contributions of key edX institutions, including UC Berkeley, which is the inaugural chair of the “X University” consortium and major contributor to the platform. All of the courses will be hosted on edX’s innovative platform at www.edx.org and are open for registration as of today. EdX expects to announce a second set of spring 2013 courses in the future.

    + +

    About edX

    + +

    EdX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology focused on transforming online and on-campus learning through groundbreaking methodologies, game-like experiences and cutting-edge research. EdX provides inspirational and transformative knowledge to students of all ages, social status, and income who form worldwide communities of learners. EdX uses its open source technology to transcend physical and social borders. We’re focused on people, not profit. EdX is based in Cambridge, Massachusetts in the USA.

    + +
    +

    Contact: Brad Baker

    +

    BBaker@webershandwick.com

    +

    617-520-7260

    +
    + + +
    +
    +
    diff --git a/lms/templates/static_templates/press_releases/Wellesley_College_joins_edX.html b/lms/templates/static_templates/press_releases/Wellesley_College_joins_edX.html new file mode 100644 index 0000000000..5e25114d3a --- /dev/null +++ b/lms/templates/static_templates/press_releases/Wellesley_College_joins_edX.html @@ -0,0 +1,73 @@ +<%! from django.core.urlresolvers import reverse %> +<%inherit file="../../main.html" /> + +<%namespace name='static' file='../../static_content.html'/> + +<%block name="title">Wellesley College joins edX +
    + + +
    +
    +

    Wellesley College becomes first liberal arts college to join edX

    +
    +
    +

    Wellesley joins edX to advance learning collaborative, broadens course options while bringing a unique small classroom experience to the world of massive open online courses

    + +

    CAMBRIDGE, MA – December 04, 2012 — edX, the online learning initiative founded by Harvard University and the Massachusetts Institute of Technology (MIT) and launched in May, announced today the addition of Wellesley College to its group of educational leaders who are focused on providing a category-leading quality higher education experience to the global online community. Wellesley College is the first liberal arts college to join edX—and the first women’s college to offer massive open online courses (MOOCs). Wellesley College will provide a series of WellesleyX courses to the platform that are unique to the College and broaden the course offerings on edx.org.

    + +

    According to H. Kim Bottomly, President of Wellesley College, WellesleyX provides an opportunity for the College to impact the future of higher education. “Wellesley is ready to contribute our liberal arts perspective to help shape online education, particularly as colleges work to figure out how to bring the small classroom experience to the online learning landscape. We are convinced that Wellesley and its outstanding faculty have the creativity and vision to take on this challenge.”

    + +

    Bottomly added, “This is a grand experiment, and what we learn will benefit Wellesley students as well as students all over the world.

    + +

    Regarded as one of the world’s finest colleges, Wellesley is known for cultivating generations of women leaders; its pedagogical innovation; and its commitment to highly personalized, discussion-based learning. With the launch of WellesleyX, the College will open access to its rigorous courses and distinguished faculty to anyone with an internet connection.

    + +

    “We are excited that Wellesley College has chosen to join with edX,” said Anant Agarwal, President of edX. “Wellesley’s long history of educating women leaders in diplomacy, the arts, science and business provides a unique strength. We look forward to working alongside the Wellesley faculty to extend their reach to hundreds of thousands of women and men around the world.”

    + +

    Through edX, the “X Universities” will provide interactive education wherever there is access to the Internet and will enhance teaching and learning through research about how students learn, and how technologies can facilitate effective teaching both on-campus and online. The University of California, Berkeley joined edX in July and the University of Texas System joined in October.

    + +

    “Wellesley College is a welcome addition to edX and our efforts to fully realize the potential of online education for students on campus and online,” said Harvard President Drew Faust. “As an institution that has provided an outstanding educational experience to many thousands of women for over 100 years, Wellesley brings to edX both a unique academic perspective and a commitment to excellence in education.”

    + +

    “Wellesley College's decision to join the edX platform is excellent news for edX and for the platform's growing number of users around the world,” said MIT President L. Rafael Reif. “Wellesley brings a distinctive history that will further enrich the efforts we are making to tailor instruction to the different ways by which people learn.”

    + +

    WellesleyX will offer four courses on edX beginning in the fall of 2013. All of the courses will be hosted from edX’s innovative platform at www.edx.org.

    + +

    About edX

    + +

    edX is a not-for-profit enterprise of its founding partners Harvard University and the Massachusetts Institute of Technology that features learning designed specifically for interactive study via the web. Based on a long history of collaboration and their shared educational missions the founders are creating a new online-learning experience. Anant Agarwal, former Director of MIT’s Computer Science and Artificial Intelligence Laboratory, serves as the first president of edX. Along with offering online courses, the institutions will use edX to research how students learn and how technology can transform learning—both on-campus and worldwide. EdX is based in Cambridge, Massachusetts and is governed by MIT and Harvard.

    + +

    About Wellesley College

    + +

    Since 1875, Wellesley College has been the preeminent liberal arts college for women. Known for its intellectual rigor and its remarkable track record for the cultivation of women leaders in every arena, Wellesley—only 12 miles from Boston—is home to some 2300 undergraduates from every state and 75 countries.

    + +
    +

    Contact: Amanda Keane

    +

    akeane@webershandwick.com

    +

    617-520-7260

    +
    +
    + + +
    +
    +
    diff --git a/lms/templates/test_center_register.html b/lms/templates/test_center_register.html new file mode 100644 index 0000000000..f6c53c0e89 --- /dev/null +++ b/lms/templates/test_center_register.html @@ -0,0 +1,480 @@ +<%! + from django.core.urlresolvers import reverse + from courseware.courses import course_image_url, get_course_about_section + from courseware.access import has_access + from certificates.models import CertificateStatuses +%> +<%inherit file="main.html" /> + +<%namespace name='static' file='static_content.html'/> + +<%block name="title">Pearson VUE Test Center Proctoring - Registration + +<%block name="js_extra"> + + + +
    + +
    +
    +
    +

    ${get_course_about_section(course, 'university')} ${course.number} ${course.title}

    + + % if registration: +

    Your Pearson VUE Proctored Exam Registration

    + % else: +

    Register for a Pearson VUE Proctored Exam

    + % endif +
    +
    +
    + + <% + exam_help_href = "mailto:exam-help@edx.org?subject=Pearson VUE Exam - " + get_course_about_section(course, 'university') + " - " + course.number + %> + + % if registration: + + % if registration.is_accepted: +
    +

    Your registration for the Pearson exam has been processed

    +

    Your registration number is ${registration.client_candidate_id}. (Write this down! You’ll need it to schedule your exam.)

    + Schedule Pearson exam +
    + % endif + + % if registration.demographics_is_rejected: +
    +

    Your demographic information contained an error and was rejected

    +

    Please check the information you provided, and correct the errors noted below. +

    + % endif + + % if registration.registration_is_rejected: +
    +

    Your registration for the Pearson exam has been rejected

    +

    Please see your registration status details for more information.

    +
    + % endif + + % if registration.is_pending: +
    +

    Your registration for the Pearson exam is pending

    +

    Once your information is processed, it will be forwarded to Pearson and you will be able to schedule an exam.

    +
    + % endif + + % endif + +
    +
    + +
    + % if exam_info.is_registering(): +
    + % else: + + +
    +

    Registration for this Pearson exam is closed

    +

    Your previous information is available below, however you may not edit any of the information. +

    + % endif + + % if registration: +

    + Please use the following form if you need to update your demographic information used in your Pearson VUE Proctored Exam. Required fields are noted by bold text and an asterisk (*). +

    + % else: +

    + Please provide the following demographic information to register for a Pearson VUE Proctored Exam. Required fields are noted by bold text and an asterisk (*). +

    + % endif + + + + + + + +
    +
    + + +
      +
    1. + + +
    2. +
    3. + + +
    4. +
    5. + + +
    6. +
    7. + + +
    8. +
    9. + + +
    10. +
    +
    + +
    + + +
      +
    1. + + +
    2. +
    3. +
      + + +
      +
      + + +
      +
    4. +
    5. + + +
    6. +
    7. +
      + + +
      +
      + + +
      +
      + + +
      +
    8. +
    +
    + +
    + + +
      +
    1. +
      + + +
      +
      + + +
      +
      + + +
      +
    2. +
    3. +
      + + +
      +
      + + +
      +
    4. +
    5. + + +
    6. +
    +
    +
    + + % if registration: + % if registration.accommodation_request and len(registration.accommodation_request) > 0: +
    + % endif + % else: +
    + % endif + + % if registration: + % if registration.accommodation_request and len(registration.accommodation_request) > 0: +

    Note: Your previous accommodation request below needs to be reviewed in detail and will add a significant delay to your registration process.

    + % endif + % else: +

    Note: Accommodation requests are not part of your demographic information, and cannot be changed once submitted. Accommodation requests, which are reviewed on a case-by-case basis, will add significant delay to the registration process.

    + % endif + +
    + + +
      + % if registration: + % if registration.accommodation_request and len(registration.accommodation_request) > 0: + + % endif + % else: +
    1. + + +
    2. + % endif +
    +
    +
    + +
    + % if registration: + + Cancel Update + % else: + + Cancel Registration + % endif + +
    +

    +
      +
      +
      + + + % if registration: + % if registration.accommodation_request and len(registration.accommodation_request) > 0: + + % endif + % else: + Special (ADA) Accommodations + % endif +
      + + +
      diff --git a/lms/templates/university_profile/georgetownx.html b/lms/templates/university_profile/georgetownx.html new file mode 100644 index 0000000000..a519746c4c --- /dev/null +++ b/lms/templates/university_profile/georgetownx.html @@ -0,0 +1,24 @@ +<%inherit file="base.html" /> +<%namespace name='static' file='../static_content.html'/> + +<%block name="title">GeorgetownX + +<%block name="university_header"> + + + + +<%block name="university_description"> +

      Georgetown University, the nation’s oldest Catholic and Jesuit university, is one of the world’s leading academic and research institutions, offering a unique educational experience that prepares the next generation of global citizens to lead and make a difference in the world.  Students receive a world-class learning experience focused on educating the whole person through exposure to different faiths, cultures and beliefs.

      + + +${parent.body()} diff --git a/lms/templates/university_profile/wellesleyx.html b/lms/templates/university_profile/wellesleyx.html new file mode 100644 index 0000000000..55264d90d0 --- /dev/null +++ b/lms/templates/university_profile/wellesleyx.html @@ -0,0 +1,24 @@ +<%inherit file="base.html" /> +<%namespace name='static' file='../static_content.html'/> + +<%block name="title">WellesleyX + +<%block name="university_header"> + + + + +<%block name="university_description"> +

      Since 1875, Wellesley College has been the preeminent liberal arts college for women. Known for its intellectual rigor and its remarkable track record for the cultivation of women leaders in every arena, Wellesley—only 12 miles from Boston—is home to some 2300 undergraduates from every state and 75 countries.

      + + +${parent.body()} diff --git a/lms/templates/video.html b/lms/templates/video.html index 5c041d5c70..4d4df8c3c7 100644 --- a/lms/templates/video.html +++ b/lms/templates/video.html @@ -2,17 +2,21 @@

      ${display_name}

      % endif -
      -
      -
      -
      -
      -
      - -
      -
      +%if settings.MITX_FEATURES['STUB_VIDEO_FOR_TESTING']: +
      +%else: +
      +
      +
      +
      +
      +
      +
      +
      +
      -
      +%endif + % if source:

      Download video here.

      diff --git a/lms/urls.py b/lms/urls.py index 529396c20e..cab0533f89 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -37,12 +37,17 @@ urlpatterns = ('', url(r'^event$', 'track.views.user_track'), url(r'^t/(?P