-${preview}
\ No newline at end of file
+${preview}
+
diff --git a/cms/templates/overview.html b/cms/templates/overview.html
index 2a46908c55..a20531200e 100644
--- a/cms/templates/overview.html
+++ b/cms/templates/overview.html
@@ -122,7 +122,7 @@
% for section in sections:
diff --git a/cms/templates/unit.html b/cms/templates/unit.html
index 0599411a67..ef94d51576 100644
--- a/cms/templates/unit.html
+++ b/cms/templates/unit.html
@@ -5,6 +5,7 @@
<%block name="title">CMS Unit%block>
<%block name="jsextra">
+
%block>
<%block name="content">
@@ -46,20 +54,43 @@
% endfor
- % for type, templates in sorted(component_templates.items()):
-
- % endfor
+ % for type, templates in sorted(component_templates.items()):
+
+
Select ${type} component type:
+
+
+
Cancel
+
+ % endfor
diff --git a/cms/templates/widgets/problem-edit.html b/cms/templates/widgets/problem-edit.html
new file mode 100644
index 0000000000..c263cad5ed
--- /dev/null
+++ b/cms/templates/widgets/problem-edit.html
@@ -0,0 +1,83 @@
+<%include file="metadata-edit.html" />
+
+
+ %if markdown != '' or data == '
\n\n':
+
+
+ %endif
+
+
+
+
+
diff --git a/common/djangoapps/mitxmako/shortcuts.py b/common/djangoapps/mitxmako/shortcuts.py
index 181d3befd5..ebeb0fc180 100644
--- a/common/djangoapps/mitxmako/shortcuts.py
+++ b/common/djangoapps/mitxmako/shortcuts.py
@@ -12,10 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import logging
-
-log = logging.getLogger("mitx." + __name__)
-
from django.template import Context
from django.http import HttpResponse
diff --git a/common/djangoapps/mitxmako/template.py b/common/djangoapps/mitxmako/template.py
index 56096fe173..947dc8c1a4 100644
--- a/common/djangoapps/mitxmako/template.py
+++ b/common/djangoapps/mitxmako/template.py
@@ -54,5 +54,4 @@ class Template(MakoTemplate):
context_dictionary['MITX_ROOT_URL'] = settings.MITX_ROOT_URL
context_dictionary['django_context'] = context_instance
- return super(Template, self).render(**context_dictionary)
-
+ return super(Template, self).render_unicode(**context_dictionary)
diff --git a/common/djangoapps/student/models.py b/common/djangoapps/student/models.py
index 0eded21df1..4932e579a7 100644
--- a/common/djangoapps/student/models.py
+++ b/common/djangoapps/student/models.py
@@ -36,10 +36,12 @@ file and check it in at the same time as your model changes. To do that,
3. Add the migration file created in mitx/common/djangoapps/student/migrations/
"""
from datetime import datetime
+import hashlib
import json
import logging
import uuid
+
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
@@ -47,7 +49,6 @@ from django.db.models.signals import post_save
from django.dispatch import receiver
import comment_client as cc
-from django_comment_client.models import Role
log = logging.getLogger(__name__)
@@ -125,9 +126,9 @@ class UserProfile(models.Model):
self.meta = json.dumps(js)
class TestCenterUser(models.Model):
- """This is our representation of the User for in-person testing, and
+ """This is our representation of the User for in-person testing, and
specifically for Pearson at this point. A few things to note:
-
+
* Pearson only supports Latin-1, so we have to make sure that the data we
capture here will work with that encoding.
* While we have a lot of this demographic data in UserProfile, it's much
@@ -135,9 +136,9 @@ class TestCenterUser(models.Model):
UserProfile, but we'll need to have a step where people who are signing
up re-enter their demographic data into the fields we specify.
* Users are only created here if they register to take an exam in person.
-
+
The field names and lengths are modeled on the conventions and constraints
- of Pearson's data import system, including oddities such as suffix having
+ of Pearson's data import system, including oddities such as suffix having
a limit of 255 while last_name only gets 50.
"""
# Our own record keeping...
@@ -148,21 +149,21 @@ class TestCenterUser(models.Model):
# and is something Pearson needs to know to manage updates. Unlike
# updated_at, this will not get incremented when we do a batch data import.
user_updated_at = models.DateTimeField(db_index=True)
-
+
# Unique ID given to us for this User by the Testing Center. It's null when
# we first create the User entry, and is assigned by Pearson later.
candidate_id = models.IntegerField(null=True, db_index=True)
-
+
# Unique ID we assign our user for a the Test Center.
client_candidate_id = models.CharField(max_length=50, db_index=True)
-
+
# Name
first_name = models.CharField(max_length=30, db_index=True)
last_name = models.CharField(max_length=50, db_index=True)
middle_name = models.CharField(max_length=30, blank=True)
suffix = models.CharField(max_length=255, blank=True)
salutation = models.CharField(max_length=50, blank=True)
-
+
# Address
address_1 = models.CharField(max_length=40)
address_2 = models.CharField(max_length=40, blank=True)
@@ -175,7 +176,7 @@ class TestCenterUser(models.Model):
postal_code = models.CharField(max_length=16, blank=True, db_index=True)
# country is a ISO 3166-1 alpha-3 country code (e.g. "USA", "CAN", "MNG")
country = models.CharField(max_length=3, db_index=True)
-
+
# Phone
phone = models.CharField(max_length=35)
extension = models.CharField(max_length=8, blank=True, db_index=True)
@@ -183,14 +184,27 @@ class TestCenterUser(models.Model):
fax = models.CharField(max_length=35, blank=True)
# fax_country_code required *if* fax is present.
fax_country_code = models.CharField(max_length=3, blank=True)
-
+
# Company
company_name = models.CharField(max_length=50, blank=True)
-
+
@property
def email(self):
return self.user.email
+def unique_id_for_user(user):
+ """
+ Return a unique id for a user, suitable for inserting into
+ e.g. personalized survey links.
+ """
+ # include the secret key as a salt, and to make the ids unique accross
+ # different LMS installs.
+ h = hashlib.md5()
+ h.update(settings.SECRET_KEY)
+ h.update(str(user.id))
+ return h.hexdigest()
+
+
## TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
@@ -247,15 +261,6 @@ class CourseEnrollment(models.Model):
return "[CourseEnrollment] %s: %s (%s)" % (self.user, self.course_id, self.created)
-@receiver(post_save, sender=CourseEnrollment)
-def assign_default_role(sender, instance, **kwargs):
- if instance.user.is_staff:
- role = Role.objects.get_or_create(course_id=instance.course_id, name="Moderator")[0]
- else:
- role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0]
-
- logging.info("assign_default_role: adding %s as %s" % (instance.user, role))
- instance.user.roles.add(role)
#cache_relation(User.profile)
@@ -363,10 +368,10 @@ def replicate_user_save(sender, **kwargs):
# @receiver(post_save, sender=CourseEnrollment)
def replicate_enrollment_save(sender, **kwargs):
- """This is called when a Student enrolls in a course. It has to do the
+ """This is called when a Student enrolls in a course. It has to do the
following:
- 1. Make sure the User is copied into the Course DB. It may already exist
+ 1. Make sure the User is copied into the Course DB. It may already exist
(someone deleting and re-adding a course). This has to happen first or
the foreign key constraint breaks.
2. Replicate the CourseEnrollment.
@@ -410,9 +415,9 @@ USER_FIELDS_TO_COPY = ["id", "username", "first_name", "last_name", "email",
def replicate_user(portal_user, course_db_name):
"""Replicate a User to the correct Course DB. This is more complicated than
- it should be because Askbot extends the auth_user table and adds its own
+ it should be because Askbot extends the auth_user table and adds its own
fields. So we need to only push changes to the standard fields and leave
- the rest alone so that Askbot changes at the Course DB level don't get
+ the rest alone so that Askbot changes at the Course DB level don't get
overridden.
"""
try:
@@ -457,7 +462,7 @@ def is_valid_course_id(course_id):
"""Right now, the only database that's not a course database is 'default'.
I had nicer checking in here originally -- it would scan the courses that
were in the system and only let you choose that. But it was annoying to run
- tests with, since we don't have course data for some for our course test
+ tests with, since we don't have course data for some for our course test
databases. Hence the lazy version.
"""
return course_id != 'default'
diff --git a/common/djangoapps/student/tests.py b/common/djangoapps/student/tests.py
index cde95153fd..4c7c9e2592 100644
--- a/common/djangoapps/student/tests.py
+++ b/common/djangoapps/student/tests.py
@@ -6,11 +6,16 @@ Replace this with more appropriate tests for your application.
"""
import logging
from datetime import datetime
+from hashlib import sha1
from django.test import TestCase
+from mock import patch, Mock
from nose.plugins.skip import SkipTest
-from .models import User, UserProfile, CourseEnrollment, replicate_user, USER_FIELDS_TO_COPY
+from .models import (User, UserProfile, CourseEnrollment,
+ replicate_user, USER_FIELDS_TO_COPY,
+ unique_id_for_user)
+from .views import process_survey_link, _cert_info
COURSE_1 = 'edX/toy/2012_Fall'
COURSE_2 = 'edx/full/6.002_Spring_2012'
@@ -55,7 +60,7 @@ class ReplicationTest(TestCase):
# This hasattr lameness is here because we don't want this test to be
# triggered when we're being run by CMS tests (Askbot doesn't exist
# there, so the test will fail).
- #
+ #
# seen_response_count isn't a field we care about, so it shouldn't have
# been copied over.
if hasattr(portal_user, 'seen_response_count'):
@@ -74,7 +79,7 @@ class ReplicationTest(TestCase):
# During this entire time, the user data should never have made it over
# to COURSE_2
- self.assertRaises(User.DoesNotExist,
+ self.assertRaises(User.DoesNotExist,
User.objects.using(COURSE_2).get,
id=portal_user.id)
@@ -108,19 +113,19 @@ class ReplicationTest(TestCase):
# Grab all the copies we expect
course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
self.assertEquals(portal_user, course_user)
- self.assertRaises(User.DoesNotExist,
+ self.assertRaises(User.DoesNotExist,
User.objects.using(COURSE_2).get,
id=portal_user.id)
course_enrollment = CourseEnrollment.objects.using(COURSE_1).get(id=portal_enrollment.id)
self.assertEquals(portal_enrollment, course_enrollment)
- self.assertRaises(CourseEnrollment.DoesNotExist,
+ self.assertRaises(CourseEnrollment.DoesNotExist,
CourseEnrollment.objects.using(COURSE_2).get,
id=portal_enrollment.id)
course_user_profile = UserProfile.objects.using(COURSE_1).get(id=portal_user_profile.id)
self.assertEquals(portal_user_profile, course_user_profile)
- self.assertRaises(UserProfile.DoesNotExist,
+ self.assertRaises(UserProfile.DoesNotExist,
UserProfile.objects.using(COURSE_2).get,
id=portal_user_profile.id)
@@ -174,30 +179,112 @@ class ReplicationTest(TestCase):
portal_user.save()
portal_user_profile.gender = 'm'
portal_user_profile.save()
-
- # Grab all the copies we expect, and make sure it doesn't end up in
+
+ # Grab all the copies we expect, and make sure it doesn't end up in
# places we don't expect.
course_user = User.objects.using(COURSE_1).get(id=portal_user.id)
self.assertEquals(portal_user, course_user)
- self.assertRaises(User.DoesNotExist,
+ self.assertRaises(User.DoesNotExist,
User.objects.using(COURSE_2).get,
id=portal_user.id)
course_enrollment = CourseEnrollment.objects.using(COURSE_1).get(id=portal_enrollment.id)
self.assertEquals(portal_enrollment, course_enrollment)
- self.assertRaises(CourseEnrollment.DoesNotExist,
+ self.assertRaises(CourseEnrollment.DoesNotExist,
CourseEnrollment.objects.using(COURSE_2).get,
id=portal_enrollment.id)
course_user_profile = UserProfile.objects.using(COURSE_1).get(id=portal_user_profile.id)
self.assertEquals(portal_user_profile, course_user_profile)
- self.assertRaises(UserProfile.DoesNotExist,
+ self.assertRaises(UserProfile.DoesNotExist,
UserProfile.objects.using(COURSE_2).get,
id=portal_user_profile.id)
+class CourseEndingTest(TestCase):
+ """Test things related to course endings: certificates, surveys, etc"""
+ def test_process_survey_link(self):
+ username = "fred"
+ user = Mock(username=username)
+ id = unique_id_for_user(user)
+ link1 = "http://www.mysurvey.com"
+ self.assertEqual(process_survey_link(link1, user), link1)
+ link2 = "http://www.mysurvey.com?unique={UNIQUE_ID}"
+ link2_expected = "http://www.mysurvey.com?unique={UNIQUE_ID}".format(UNIQUE_ID=id)
+ self.assertEqual(process_survey_link(link2, user), link2_expected)
+ def test_cert_info(self):
+ user = Mock(username="fred")
+ survey_url = "http://a_survey.com"
+ course = Mock(end_of_course_survey_url=survey_url)
+ self.assertEqual(_cert_info(user, course, None),
+ {'status': 'processing',
+ 'show_disabled_download_button': False,
+ 'show_download_url': False,
+ 'show_survey_button': False,})
+ cert_status = {'status': 'unavailable'}
+ self.assertEqual(_cert_info(user, course, cert_status),
+ {'status': 'processing',
+ 'show_disabled_download_button': False,
+ 'show_download_url': False,
+ 'show_survey_button': False})
+
+ cert_status = {'status': 'generating', 'grade': '67'}
+ self.assertEqual(_cert_info(user, course, cert_status),
+ {'status': 'generating',
+ 'show_disabled_download_button': True,
+ 'show_download_url': False,
+ 'show_survey_button': True,
+ 'survey_url': survey_url,
+ 'grade': '67'
+ })
+
+ cert_status = {'status': 'regenerating', 'grade': '67'}
+ self.assertEqual(_cert_info(user, course, cert_status),
+ {'status': 'generating',
+ 'show_disabled_download_button': True,
+ 'show_download_url': False,
+ 'show_survey_button': True,
+ 'survey_url': survey_url,
+ 'grade': '67'
+ })
+
+ download_url = 'http://s3.edx/cert'
+ cert_status = {'status': 'downloadable', 'grade': '67',
+ 'download_url': download_url}
+ self.assertEqual(_cert_info(user, course, cert_status),
+ {'status': 'ready',
+ 'show_disabled_download_button': False,
+ 'show_download_url': True,
+ 'download_url': download_url,
+ 'show_survey_button': True,
+ 'survey_url': survey_url,
+ 'grade': '67'
+ })
+
+ cert_status = {'status': 'notpassing', 'grade': '67',
+ 'download_url': download_url}
+ self.assertEqual(_cert_info(user, course, cert_status),
+ {'status': 'notpassing',
+ 'show_disabled_download_button': False,
+ 'show_download_url': False,
+ 'show_survey_button': True,
+ 'survey_url': survey_url,
+ 'grade': '67'
+ })
+
+ # Test a course that doesn't have a survey specified
+ course2 = Mock(end_of_course_survey_url=None)
+ cert_status = {'status': 'notpassing', 'grade': '67',
+ 'download_url': download_url}
+ self.assertEqual(_cert_info(user, course2, cert_status),
+ {'status': 'notpassing',
+ 'show_disabled_download_button': False,
+ 'show_download_url': False,
+ 'show_survey_button': False,
+ 'grade': '67'
+ })
diff --git a/common/djangoapps/student/views.py b/common/djangoapps/student/views.py
index e7562f83d0..06c59d7937 100644
--- a/common/djangoapps/student/views.py
+++ b/common/djangoapps/student/views.py
@@ -28,7 +28,7 @@ from django.core.cache import cache
from django_future.csrf import ensure_csrf_cookie, csrf_exempt
from student.models import (Registration, UserProfile,
PendingNameChange, PendingEmailChange,
- CourseEnrollment)
+ CourseEnrollment, unique_id_for_user)
from certificates.models import CertificateStatuses, certificate_status_for_student
@@ -39,7 +39,8 @@ from xmodule.modulestore.exceptions import ItemNotFoundError
from datetime import date
from collections import namedtuple
-from courseware.courses import get_courses_by_university
+
+from courseware.courses import get_courses
from courseware.access import has_access
from statsd import statsd
@@ -68,31 +69,26 @@ def index(request, extra_context={}, user=None):
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
'''
- feed_data = cache.get("students_index_rss_feed_data")
- if feed_data == None:
- if hasattr(settings, 'RSS_URL'):
- feed_data = urllib.urlopen(settings.RSS_URL).read()
- else:
- feed_data = render_to_string("feed.rss", None)
- cache.set("students_index_rss_feed_data", feed_data, settings.RSS_TIMEOUT)
-
- feed = feedparser.parse(feed_data)
- entries = feed['entries'][0:3]
- for entry in entries:
- soup = BeautifulSoup(entry.description)
- entry.image = soup.img['src'] if soup.img else None
- entry.summary = soup.getText()
# The course selection work is done in courseware.courses.
domain = settings.MITX_FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
if domain==False: # do explicit check, because domain=None is valid
domain = request.META.get('HTTP_HOST')
- universities = get_courses_by_university(None,
- domain=domain)
- context = {'universities': universities, 'entries': entries}
+
+ courses = get_courses(None, domain=domain)
+
+ # Sort courses by how far are they from they start day
+ key = lambda course: course.metadata['days_to_start']
+ courses = sorted(courses, key=key, reverse=True)
+
+ # Get the 3 most recent news
+ top_news = _get_news(top=3)
+
+ context = {'courses': courses, 'news': top_news}
context.update(extra_context)
return render_to_response('index.html', context)
+
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
@@ -107,9 +103,9 @@ def get_date_for_press(publish_date):
# strip off extra months, and just use the first:
date = re.sub(multimonth_pattern, ", ", publish_date)
if re.search(day_pattern, date):
- date = datetime.datetime.strptime(date, "%B %d, %Y")
- else:
- date = datetime.datetime.strptime(date, "%B, %Y")
+ date = datetime.datetime.strptime(date, "%B %d, %Y")
+ else:
+ date = datetime.datetime.strptime(date, "%B, %Y")
return date
def press(request):
@@ -127,6 +123,87 @@ def press(request):
return render_to_response('static_templates/press.html', {'articles': articles})
+def process_survey_link(survey_link, user):
+ """
+ If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
+ Currently, this is sha1(user.username). Otherwise, return survey_link.
+ """
+ return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
+
+
+def cert_info(user, course):
+ """
+ Get the certificate info needed to render the dashboard section for the given
+ student and course. Returns a dictionary with keys:
+
+ 'status': one of 'generating', 'ready', 'notpassing', 'processing'
+ 'show_download_url': bool
+ 'download_url': url, only present if show_download_url is True
+ 'show_disabled_download_button': bool -- true if state is 'generating'
+ 'show_survey_button': bool
+ 'survey_url': url, only if show_survey_button is True
+ 'grade': if status is not 'processing'
+ """
+ if not course.has_ended():
+ return {}
+
+ return _cert_info(user, course, certificate_status_for_student(user, course.id))
+
+def _cert_info(user, course, cert_status):
+ """
+ Implements the logic for cert_info -- split out for testing.
+ """
+ default_status = 'processing'
+
+ default_info = {'status': default_status,
+ 'show_disabled_download_button': False,
+ 'show_download_url': False,
+ 'show_survey_button': False}
+
+ if cert_status is None:
+ return default_info
+
+ # simplify the status for the template using this lookup table
+ template_state = {
+ CertificateStatuses.generating: 'generating',
+ CertificateStatuses.regenerating: 'generating',
+ CertificateStatuses.downloadable: 'ready',
+ CertificateStatuses.notpassing: 'notpassing',
+ }
+
+ status = template_state.get(cert_status['status'], default_status)
+
+ d = {'status': status,
+ 'show_download_url': status == 'ready',
+ 'show_disabled_download_button': status == 'generating',}
+
+ if (status in ('generating', 'ready', 'notpassing') and
+ course.end_of_course_survey_url is not None):
+ d.update({
+ 'show_survey_button': True,
+ 'survey_url': process_survey_link(course.end_of_course_survey_url, user)})
+ else:
+ d['show_survey_button'] = False
+
+ if status == 'ready':
+ if 'download_url' not in cert_status:
+ log.warning("User %s has a downloadable cert for %s, but no download url",
+ user.username, course.id)
+ return default_info
+ else:
+ d['download_url'] = cert_status['download_url']
+
+ if status in ('generating', 'ready', 'notpassing'):
+ if 'grade' not in cert_status:
+ # Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
+ # who need to be regraded (we weren't tracking 'notpassing' at first).
+ # We can add a log.warning here once we think it shouldn't happen.
+ return default_info
+ else:
+ d['grade'] = cert_status['grade']
+
+ return d
+
@login_required
@ensure_csrf_cookie
def dashboard(request):
@@ -160,12 +237,10 @@ def dashboard(request):
show_courseware_links_for = frozenset(course.id for course in courses
if has_access(request.user, course, 'load'))
- # TODO: workaround to not have to zip courses and certificates in the template
- # since before there is a migration to certificates
- if settings.MITX_FEATURES.get('CERTIFICATES_ENABLED'):
- cert_statuses = { course.id: certificate_status_for_student(request.user, course.id) for course in courses}
- else:
- cert_statuses = {}
+ cert_statuses = { course.id: cert_info(request.user, course) for course in courses}
+
+ # Get the 3 most recent news
+ top_news = _get_news(top=3)
context = {'courses': courses,
'message': message,
@@ -173,6 +248,7 @@ def dashboard(request):
'errored_courses': errored_courses,
'show_courseware_links_for' : show_courseware_links_for,
'cert_statuses': cert_statuses,
+ 'news': top_news,
}
return render_to_response('dashboard.html', context)
@@ -262,6 +338,14 @@ def change_enrollment(request):
return {'success': False, 'error': 'We weren\'t able to unenroll you. Please try again.'}
+@ensure_csrf_cookie
+def accounts_login(request, error=""):
+
+
+ return render_to_response('accounts_login.html', { 'error': error })
+
+
+
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""):
@@ -820,3 +904,24 @@ def test_center_login(request):
return redirect('/courses/MITx/6.002x/2012_Fall/courseware/Final_Exam/Final_Exam_Fall_2012/')
else:
return HttpResponseForbidden()
+
+
+def _get_news(top=None):
+ "Return the n top news items on settings.RSS_URL"
+
+ feed_data = cache.get("students_index_rss_feed_data")
+ if feed_data == None:
+ if hasattr(settings, 'RSS_URL'):
+ feed_data = urllib.urlopen(settings.RSS_URL).read()
+ else:
+ feed_data = render_to_string("feed.rss", None)
+ cache.set("students_index_rss_feed_data", feed_data, settings.RSS_TIMEOUT)
+
+ feed = feedparser.parse(feed_data)
+ entries = feed['entries'][0:top] # all entries if top is None
+ for entry in entries:
+ soup = BeautifulSoup(entry.description)
+ entry.image = soup.img['src'] if soup.img else None
+ entry.summary = soup.getText()
+
+ return entries
diff --git a/common/djangoapps/track/migrations/0001_initial.py b/common/djangoapps/track/migrations/0001_initial.py
new file mode 100644
index 0000000000..0546203cf8
--- /dev/null
+++ b/common/djangoapps/track/migrations/0001_initial.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'TrackingLog'
+ db.create_table('track_trackinglog', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
+ ('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
+ ('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
+ ('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)),
+ ('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
+ ('event', self.gf('django.db.models.fields.TextField')(blank=True)),
+ ('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
+ ('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
+ ('time', self.gf('django.db.models.fields.DateTimeField')()),
+ ))
+ db.send_create_signal('track', ['TrackingLog'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'TrackingLog'
+ db.delete_table('track_trackinglog')
+
+
+ models = {
+ 'track.trackinglog': {
+ 'Meta': {'object_name': 'TrackingLog'},
+ 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
+ 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
+ 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
+ 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
+ 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
+ 'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
+ 'time': ('django.db.models.fields.DateTimeField', [], {}),
+ 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
+ }
+ }
+
+ complete_apps = ['track']
\ No newline at end of file
diff --git a/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py
new file mode 100644
index 0000000000..4c73aa3bfd
--- /dev/null
+++ b/common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding field 'TrackingLog.host'
+ db.add_column('track_trackinglog', 'host',
+ self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
+ keep_default=False)
+
+
+ # Changing field 'TrackingLog.event_type'
+ db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512))
+
+ # Changing field 'TrackingLog.page'
+ db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True))
+
+ def backwards(self, orm):
+ # Deleting field 'TrackingLog.host'
+ db.delete_column('track_trackinglog', 'host')
+
+
+ # Changing field 'TrackingLog.event_type'
+ db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32))
+
+ # Changing field 'TrackingLog.page'
+ db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
+
+ models = {
+ 'track.trackinglog': {
+ 'Meta': {'object_name': 'TrackingLog'},
+ 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
+ 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
+ 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
+ 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
+ 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
+ 'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
+ 'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
+ 'time': ('django.db.models.fields.DateTimeField', [], {}),
+ 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
+ }
+ }
+
+ complete_apps = ['track']
\ No newline at end of file
diff --git a/common/djangoapps/track/migrations/__init__.py b/common/djangoapps/track/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/common/djangoapps/track/models.py b/common/djangoapps/track/models.py
index 401fa2832f..dfdf7a0558 100644
--- a/common/djangoapps/track/models.py
+++ b/common/djangoapps/track/models.py
@@ -7,11 +7,12 @@ class TrackingLog(models.Model):
username = models.CharField(max_length=32,blank=True)
ip = models.CharField(max_length=32,blank=True)
event_source = models.CharField(max_length=32)
- event_type = models.CharField(max_length=32,blank=True)
+ event_type = models.CharField(max_length=512,blank=True)
event = models.TextField(blank=True)
agent = models.CharField(max_length=256,blank=True)
- page = models.CharField(max_length=32,blank=True,null=True)
+ page = models.CharField(max_length=512,blank=True,null=True)
time = models.DateTimeField('event time')
+ host = models.CharField(max_length=64,blank=True)
def __unicode__(self):
s = "[%s] %s@%s: %s | %s | %s | %s" % (self.time, self.username, self.ip, self.event_source,
diff --git a/common/djangoapps/track/views.py b/common/djangoapps/track/views.py
index 434e75a63f..54bd476799 100644
--- a/common/djangoapps/track/views.py
+++ b/common/djangoapps/track/views.py
@@ -17,7 +17,7 @@ from track.models import TrackingLog
log = logging.getLogger("tracking")
-LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time']
+LOGFIELDS = ['username','ip','event_source','event_type','event','agent','page','time','host']
def log_event(event):
event_str = json.dumps(event)
@@ -58,6 +58,7 @@ def user_track(request):
"agent": agent,
"page": request.GET['page'],
"time": datetime.datetime.utcnow().isoformat(),
+ "host": request.META['SERVER_NAME'],
}
log_event(event)
return HttpResponse('success')
@@ -83,6 +84,7 @@ def server_track(request, event_type, event, page=None):
"agent": agent,
"page": page,
"time": datetime.datetime.utcnow().isoformat(),
+ "host": request.META['SERVER_NAME'],
}
if event_type.startswith("/event_logs") and request.user.is_staff: # don't log
diff --git a/common/djangoapps/util/json_request.py b/common/djangoapps/util/json_request.py
index f1989b01ff..4beff7bdc8 100644
--- a/common/djangoapps/util/json_request.py
+++ b/common/djangoapps/util/json_request.py
@@ -4,6 +4,11 @@ import json
def expect_json(view_function):
+ """
+ View decorator for simplifying handing of requests that expect json. If the request's
+ CONTENT_TYPE is application/json, parses the json dict from request.body, and updates
+ request.POST with the contents.
+ """
@wraps(view_function)
def expect_json_with_cloned_request(request, *args, **kwargs):
# cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information
diff --git a/common/lib/capa/.coveragerc b/common/lib/capa/.coveragerc
index 6af3218f75..149a4c860a 100644
--- a/common/lib/capa/.coveragerc
+++ b/common/lib/capa/.coveragerc
@@ -7,6 +7,7 @@ source = common/lib/capa
ignore_errors = True
[html]
+title = Capa Python Test Coverage Report
directory = reports/common/lib/capa/cover
[xml]
diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py
index 451891d067..efc96fc717 100644
--- a/common/lib/capa/capa/capa_problem.py
+++ b/common/lib/capa/capa/capa_problem.py
@@ -33,6 +33,7 @@ from xml.sax.saxutils import unescape
import chem
import chem.chemcalc
import chem.chemtools
+import chem.miller
import calc
from correctmap import CorrectMap
@@ -52,7 +53,7 @@ response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__])
solution_tags = ['solution']
# these get captured as student responses
-response_properties = ["codeparam", "responseparam", "answer"]
+response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {'problem': {'tag': 'div'},
@@ -67,10 +68,11 @@ global_context = {'random': random,
'calc': calc,
'eia': eia,
'chemcalc': chem.chemcalc,
- 'chemtools': chem.chemtools}
+ 'chemtools': chem.chemtools,
+ 'miller': chem.miller}
# These should be removed from HTML output, including all subelements
-html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"]
+html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam","openendedrubric"]
log = logging.getLogger('mitx.' + __name__)
@@ -184,6 +186,24 @@ class LoncapaProblem(object):
maxscore += responder.get_max_score()
return maxscore
+ def message_post(self,event_info):
+ """
+ Handle an ajax post that contains feedback on feedback
+ Returns a boolean success variable
+ Note: This only allows for feedback to be posted back to the grading controller for the first
+ open ended response problem on each page. Multiple problems will cause some sync issues.
+ TODO: Handle multiple problems on one page sync issues.
+ """
+ success=False
+ message = "Could not find a valid responder."
+ log.debug("in lcp")
+ for responder in self.responders.values():
+ if hasattr(responder, 'handle_message_post'):
+ success, message = responder.handle_message_post(event_info)
+ if success:
+ break
+ return success, message
+
def get_score(self):
"""
Compute score for this problem. The score is the number of points awarded.
diff --git a/common/lib/capa/capa/chem/miller.py b/common/lib/capa/capa/chem/miller.py
new file mode 100644
index 0000000000..4c10e60ecc
--- /dev/null
+++ b/common/lib/capa/capa/chem/miller.py
@@ -0,0 +1,267 @@
+""" Calculation of Miller indices """
+
+import numpy as np
+import math
+import fractions as fr
+import decimal
+import json
+
+
+def lcm(a, b):
+ """
+ Returns least common multiple of a, b
+
+ Args:
+ a, b: floats
+
+ Returns:
+ float
+ """
+ return a * b / fr.gcd(a, b)
+
+
+def segment_to_fraction(distance):
+ """
+ Converts lengths of which the plane cuts the axes to fraction.
+
+ Tries convert distance to closest nicest fraction with denominator less or
+ equal than 10. It is
+ purely for simplicity and clearance of learning purposes. Jenny: 'In typical
+ courses students usually do not encounter indices any higher than 6'.
+
+ If distance is not a number (numpy nan), it means that plane is parallel to
+ axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is
+ returned
+
+ Generally (special cases):
+
+ a) if distance is smaller than some constant, i.g. 0.01011,
+ than fraction's denominator usually much greater than 10.
+
+ b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane,
+ But if he will slightly move the mouse and click on 0.65 -> it will be
+ (16,15,16) plane. That's why we are doing adjustments for points coordinates,
+ to the closest tick, tick + tick / 2 value. And now UI sends to server only
+ values multiple to 0.05 (half of tick). Same rounding is implemented for
+ unittests.
+
+ But if one will want to calculate miller indices with exact coordinates and
+ with nice fractions (which produce small Miller indices), he may want shift
+ to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero
+ in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin.
+ In this way he can recieve nice small fractions. Also there is can be
+ degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) -
+ it is a line. This case should be considered separately. Small nice Miller
+ numbers and possibility to create very small segments can not be implemented
+ at same time).
+
+
+ Args:
+ distance: float distance that plane cuts on axis, it must not be 0.
+ Distance is multiple of 0.05.
+
+ Returns:
+ Inverted fraction.
+ 0 / 1 if distance is nan
+
+ """
+ if np.isnan(distance):
+ return fr.Fraction(0, 1)
+ else:
+ fract = fr.Fraction(distance).limit_denominator(10)
+ return fr.Fraction(fract.denominator, fract.numerator)
+
+
+def sub_miller(segments):
+ '''
+ Calculates Miller indices from segments.
+
+ Algorithm:
+
+ 1. Obtain inverted fraction from segments
+
+ 2. Find common denominator of inverted fractions
+
+ 3. Lead fractions to common denominator and throws denominator away.
+
+ 4. Return obtained values.
+
+ Args:
+ List of 3 floats, meaning distances that plane cuts on x, y, z axes.
+ Any float not equals zero, it means that plane does not intersect origin,
+ i. e. shift of origin has already been done.
+
+ Returns:
+ String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
+ '''
+ fracts = [segment_to_fraction(segment) for segment in segments]
+ common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
+ miller = ([fract.numerator * math.fabs(common_denominator) /
+ fract.denominator for fract in fracts])
+ return'(' + ','.join(map(str, map(decimal.Decimal, miller))) + ')'
+
+
+def miller(points):
+ """
+ Calculates Miller indices from points.
+
+ Algorithm:
+
+ 1. Calculate normal vector to a plane that goes trough all points.
+
+ 2. Set origin.
+
+ 3. Create Cartesian coordinate system (Ccs).
+
+ 4. Find the lengths of segments of which the plane cuts the axes. Equation
+ of a line for axes: Origin + (Coordinate_vector - Origin) * parameter.
+
+ 5. If plane goes trough Origin:
+
+ a) Find new random origin: find unit cube vertex, not crossed by a plane.
+
+ b) Repeat 2-4.
+
+ c) Fix signs of segments after Origin shift. This means to consider
+ original directions of axes. I.g.: Origin was 0,0,0 and became
+ new_origin. If new_origin has same Y coordinate as Origin, then segment
+ does not change its sign. But if new_origin has another Y coordinate than
+ origin (was 0, became 1), than segment has to change its sign (it now
+ lies on negative side of Y axis). New Origin 0 value of X or Y or Z
+ coordinate means that segment does not change sign, 1 value -> does
+ change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1
+
+ 6. Run function that calculates miller indices from segments.
+
+ Args:
+ List of points. Each point is list of float coordinates. Order of
+ coordinates in point's list: x, y, z. Points are different!
+
+ Returns:
+ String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
+ """
+
+ N = np.cross(points[1] - points[0], points[2] - points[0])
+ O = np.array([0, 0, 0])
+ P = points[0] # point of plane
+ Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])
+ segments = ([np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0 else
+ np.nan for ort in Ccs])
+ if any(x == 0 for x in segments): # Plane goes through origin.
+ vertices = [ # top:
+ np.array([1.0, 1.0, 1.0]),
+ np.array([0.0, 0.0, 1.0]),
+ np.array([1.0, 0.0, 1.0]),
+ np.array([0.0, 1.0, 1.0]),
+ # bottom, except 0,0,0:
+ np.array([1.0, 0.0, 0.0]),
+ np.array([0.0, 1.0, 0.0]),
+ np.array([1.0, 1.0, 1.0]),
+ ]
+ for vertex in vertices:
+ if np.dot(vertex - O, N) != 0: # vertex not in plane
+ new_origin = vertex
+ break
+ # obtain new axes with center in new origin
+ X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])
+ Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])
+ Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])
+ new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]
+ segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if
+ np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])
+ # fix signs of indices: 0 -> 1, 1 -> -1 (
+ segments = (1 - 2 * new_origin) * segments
+
+ return sub_miller(segments)
+
+
+def grade(user_input, correct_answer):
+ '''
+ Grade crystallography problem.
+
+ Returns true if lattices are the same and Miller indices are same or minus
+ same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only
+ on student's selection of origin.
+
+ Args:
+ user_input, correct_answer: json. Format:
+
+ user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"],
+ ["0.78","1.00","0.00"],["0.00","1.00","0.72"]]}
+
+ correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'}
+
+ "lattice" is one of: "", "sc", "bcc", "fcc"
+
+ Returns:
+ True or false.
+ '''
+ def negative(m):
+ """
+ Change sign of Miller indices.
+
+ Args:
+ m: string with meaning of Miller indices. E.g.:
+ (-6,3,-6) -> (6, -3, 6)
+
+ Returns:
+ String with changed signs.
+ """
+ output = ''
+ i = 1
+ while i in range(1, len(m) - 1):
+ if m[i] in (',', ' '):
+ output += m[i]
+ elif m[i] not in ('-', '0'):
+ output += '-' + m[i]
+ elif m[i] == '0':
+ output += m[i]
+ else:
+ i += 1
+ output += m[i]
+ i += 1
+ return '(' + output + ')'
+
+ def round0_25(point):
+ """
+ Rounds point coordinates to closest 0.5 value.
+
+ Args:
+ point: list of float coordinates. Order of coordinates: x, y, z.
+
+ Returns:
+ list of coordinates rounded to closes 0.5 value
+ """
+ rounded_points = []
+ for coord in point:
+ base = math.floor(coord * 10)
+ fractional_part = (coord * 10 - base)
+ aliquot0_25 = math.floor(fractional_part / 0.25)
+ if aliquot0_25 == 0.0:
+ rounded_points.append(base / 10)
+ if aliquot0_25 in (1.0, 2.0):
+ rounded_points.append(base / 10 + 0.05)
+ if aliquot0_25 == 3.0:
+ rounded_points.append(base / 10 + 0.1)
+ return rounded_points
+
+ user_answer = json.loads(user_input)
+
+ if user_answer['lattice'] != correct_answer['lattice']:
+ return False
+
+ points = [map(float, p) for p in user_answer['points']]
+
+ if len(points) < 3:
+ return False
+
+ # round point to closes 0.05 value
+ points = [round0_25(point) for point in points]
+
+ points = [np.array(point) for point in points]
+ # print miller(points), (correct_answer['miller'].replace(' ', ''),
+ # negative(correct_answer['miller']).replace(' ', ''))
+ if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')):
+ return True
+
+ return False
diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py
index 34d903ec1d..571526f915 100644
--- a/common/lib/capa/capa/chem/tests.py
+++ b/common/lib/capa/capa/chem/tests.py
@@ -1,13 +1,15 @@
import codecs
from fractions import Fraction
-from pyparsing import ParseException
import unittest
from chemcalc import (compare_chemical_expression, divide_chemical_expression,
render_to_html, chemical_equations_equal)
+import miller
+
local_debug = None
+
def log(s, output_type=None):
if local_debug:
print s
@@ -37,7 +39,6 @@ class Test_Compare_Equations(unittest.TestCase):
self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
-
def test_different_arrows(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
@@ -56,7 +57,6 @@ class Test_Compare_Equations(unittest.TestCase):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O2', exact=True))
-
def test_syntax_errors(self):
self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2',
'2O2 + 2H2 -> 2H2O2'))
@@ -311,7 +311,6 @@ class Test_Render_Equations(unittest.TestCase):
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
-
def test_render_eq3(self):
s = "H^+ + OH^- <= H2O" # unsupported arrow
out = render_to_html(s)
@@ -320,10 +319,148 @@ class Test_Render_Equations(unittest.TestCase):
self.assertEqual(out, correct)
+class Test_Crystallography_Miller(unittest.TestCase):
+ ''' Tests for crystallography grade function.'''
+
+ def test_empty_points(self):
+ user_input = '{"lattice": "bcc", "points": []}'
+ self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
+
+ def test_only_one_point(self):
+ user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"]]}'
+ self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
+
+ def test_only_two_points(self):
+ user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"]]}'
+ self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
+
+ def test_1(self):
+ user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"], ["0.00", "0.00", "0.50"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
+
+ def test_2(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,1,1)', 'lattice': 'bcc'}))
+
+ def test_3(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.50", "1.00"], ["1.00", "1.00", "0.50"], ["0.50", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
+
+ def test_4(self):
+ user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.664", "0.00"], ["0.00", "1.00", "0.33"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(-3, 3, -3)', 'lattice': 'bcc'}))
+
+ def test_5(self):
+ """ return true only in case points coordinates are exact.
+ But if they transform to closest 0.05 value it is not true"""
+ user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.33", "0.00"], ["0.00", "1.00", "0.33"]]}'
+ self.assertFalse(miller.grade(user_input, {'miller': '(-6,3,-6)', 'lattice': 'bcc'}))
+
+ def test_6(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.25", "0.00"], ["0.25", "0.00", "0.00"], ["0.00", "0.00", "0.25"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(4,4,4)', 'lattice': 'bcc'}))
+
+ def test_7(self): # goes throug origin
+ user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "0.00", "0.00"], ["0.50", "1.00", "0.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,0,-1)', 'lattice': 'bcc'}))
+
+ def test_8(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.50"], ["1.00", "0.00", "0.50"], ["0.50", "1.00", "0.50"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,0,2)', 'lattice': 'bcc'}))
+
+ def test_9(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "1.00", "1.00"], ["1.00", "0.00", "0.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,1,0)', 'lattice': 'bcc'}))
+
+ def test_10(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "0.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
+
+ def test_11(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "0.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,1,2)', 'lattice': 'bcc'}))
+
+ def test_12(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["0.00", "0.00", "0.50"], ["1.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,1,-2)', 'lattice': 'bcc'}))
+
+ def test_13(self):
+ user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.50", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(2,0,1)', 'lattice': 'bcc'}))
+
+ def test_14(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "0.00", "1.00"], ["0.50", "1.00", "0.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(2,-1,0)', 'lattice': 'bcc'}))
+
+ def test_15(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
+
+ def test_16(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
+
+ def test_17(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "1.00"], ["1.00", "1.00", "0.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,1)', 'lattice': 'bcc'}))
+
+ def test_18(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
+
+ def test_19(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,0)', 'lattice': 'bcc'}))
+
+ def test_20(self):
+ user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,0,1)', 'lattice': 'bcc'}))
+
+ def test_21(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(-1,0,1)', 'lattice': 'bcc'}))
+
+ def test_22(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,1,1)', 'lattice': 'bcc'}))
+
+ def test_23(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,1)', 'lattice': 'bcc'}))
+
+ def test_24(self):
+ user_input = '{"lattice": "bcc", "points": [["0.66", "0.00", "0.00"], ["0.00", "0.66", "0.00"], ["0.00", "0.00", "0.66"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'bcc'}))
+
+ def test_25(self):
+ user_input = u'{"lattice":"","points":[["0.00","0.00","0.01"],["1.00","1.00","0.01"],["0.00","1.00","1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': ''}))
+
+ def test_26(self):
+ user_input = u'{"lattice":"","points":[["0.00","0.01","0.00"],["1.00","0.00","0.00"],["0.00","0.00","1.00"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,0)', 'lattice': ''}))
+
+ def test_27(self):
+ """ rounding to 0.35"""
+ user_input = u'{"lattice":"","points":[["0.33","0.00","0.00"],["0.00","0.33","0.00"],["0.00","0.00","0.33"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': ''}))
+
+ def test_28(self):
+ """ rounding to 0.30"""
+ user_input = u'{"lattice":"","points":[["0.30","0.00","0.00"],["0.00","0.30","0.00"],["0.00","0.00","0.30"]]}'
+ self.assertTrue(miller.grade(user_input, {'miller': '(10,10,10)', 'lattice': ''}))
+
+ def test_wrong_lattice(self):
+ user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
+ self.assertFalse(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'fcc'}))
+
def suite():
- testcases = [Test_Compare_Expressions, Test_Divide_Expressions, Test_Render_Equations]
+ testcases = [Test_Compare_Expressions,
+ Test_Divide_Expressions,
+ Test_Render_Equations,
+ Test_Crystallography_Miller]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py
index 0b2250f98d..e3eb47acc5 100644
--- a/common/lib/capa/capa/inputtypes.py
+++ b/common/lib/capa/capa/inputtypes.py
@@ -671,18 +671,15 @@ class Crystallography(InputTypeBase):
"""
Note: height, width are required.
"""
- return [Attribute('size', None),
- Attribute('height'),
+ return [Attribute('height'),
Attribute('width'),
-
- # can probably be removed (textline should prob be always-hidden)
- Attribute('hidden', ''),
]
registry.register(Crystallography)
# -------------------------------------------------------------------------
+
class VseprInput(InputTypeBase):
"""
Input for molecular geometry--show possible structures, let student
@@ -736,3 +733,53 @@ class ChemicalEquationInput(InputTypeBase):
return {'previewer': '/static/js/capa/chemical_equation_preview.js',}
registry.register(ChemicalEquationInput)
+
+#-----------------------------------------------------------------------------
+
+class OpenEndedInput(InputTypeBase):
+ """
+ A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
+ etc.
+ """
+
+ template = "openendedinput.html"
+ tags = ['openendedinput']
+
+ # pulled out for testing
+ submitted_msg = ("Feedback not yet available. Reload to check again. "
+ "Once the problem is graded, this message will be "
+ "replaced with the grader's feedback.")
+
+ @classmethod
+ def get_attributes(cls):
+ """
+ Convert options to a convenient format.
+ """
+ return [Attribute('rows', '30'),
+ Attribute('cols', '80'),
+ Attribute('hidden', ''),
+ ]
+
+ def setup(self):
+ """
+ Implement special logic: handle queueing state, and default input.
+ """
+ # if no student input yet, then use the default input given by the problem
+ if not self.value:
+ self.value = self.xml.text
+
+ # Check if problem has been queued
+ self.queue_len = 0
+ # Flag indicating that the problem has been queued, 'msg' is length of queue
+ if self.status == 'incomplete':
+ self.status = 'queued'
+ self.queue_len = self.msg
+ self.msg = self.submitted_msg
+
+ def _extra_context(self):
+ """Defined queue_len, add it """
+ return {'queue_len': self.queue_len,}
+
+registry.register(OpenEndedInput)
+
+#-----------------------------------------------------------------------------
diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py
index 418ee9d8ae..16dc15297d 100644
--- a/common/lib/capa/capa/responsetypes.py
+++ b/common/lib/capa/capa/responsetypes.py
@@ -8,22 +8,25 @@ Used by capa_problem.py
'''
# standard library imports
+import abc
import cgi
+import hashlib
import inspect
import json
import logging
import numbers
import numpy
+import os
import random
import re
import requests
-import traceback
-import hashlib
-import abc
-import os
import subprocess
+import traceback
import xml.sax.saxutils as saxutils
+from collections import namedtuple
+from shapely.geometry import Point, MultiPoint
+
# specific library imports
from calc import evaluator, UndefinedVariable
from correctmap import CorrectMap
@@ -1104,6 +1107,15 @@ class SymbolicResponse(CustomResponse):
#-----------------------------------------------------------------------------
+"""
+valid: Flag indicating valid score_msg format (Boolean)
+correct: Correctness of submission (Boolean)
+score: Points to be assigned (numeric, can be float)
+msg: Message from grader to display to student (string)
+"""
+ScoreMessage = namedtuple('ScoreMessage',
+ ['valid', 'correct', 'points', 'msg'])
+
class CodeResponse(LoncapaResponse):
"""
@@ -1149,7 +1161,7 @@ class CodeResponse(LoncapaResponse):
else:
self._parse_coderesponse_xml(codeparam)
- def _parse_coderesponse_xml(self,codeparam):
+ def _parse_coderesponse_xml(self, codeparam):
'''
Parse the new CodeResponse XML format. When successful, sets:
self.initial_display
@@ -1161,17 +1173,9 @@ class CodeResponse(LoncapaResponse):
grader_payload = grader_payload.text if grader_payload is not None else ''
self.payload = {'grader_payload': grader_payload}
- answer_display = codeparam.find('answer_display')
- if answer_display is not None:
- self.answer = answer_display.text
- else:
- self.answer = 'No answer provided.'
-
- initial_display = codeparam.find('initial_display')
- if initial_display is not None:
- self.initial_display = initial_display.text
- else:
- self.initial_display = ''
+ self.initial_display = find_with_default(codeparam, 'initial_display', '')
+ self.answer = find_with_default(codeparam, 'answer_display',
+ 'No answer provided.')
def _parse_externalresponse_xml(self):
'''
@@ -1325,8 +1329,6 @@ class CodeResponse(LoncapaResponse):
# Sanity check on returned points
if points < 0:
points = 0
- elif points > self.maxpoints[self.answer_id]:
- points = self.maxpoints[self.answer_id]
# Queuestate is consumed
oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
msg=msg.replace(' ', ' '), queuestate=None)
@@ -1734,15 +1736,38 @@ class ImageResponse(LoncapaResponse):
which produces an [x,y] coordinate pair. The click is correct if it falls
within a region specified. This region is a union of rectangles.
- Lon-CAPA requires that each
has a inside it. That
- doesn't make sense to me (Ike). Instead, let's have it such that
- should contain one or more stanzas. Each should specify
- a rectangle, given as an attribute, defining the correct answer.
+ Lon-CAPA requires that each has a inside it.
+ That doesn't make sense to me (Ike). Instead, let's have it such that
+ should contain one or more stanzas.
+ Each should specify a rectangle(s) or region(s), given as an
+ attribute, defining the correct answer.
+
+
+
+ Regions is list of lists [region1, region2, region3, ...] where regionN
+ is disordered list of points: [[1,1], [100,100], [50,50], [20, 70]].
+
+ If there is only one region in the list, simpler notation can be used:
+ regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly
+ setting outer list)
+
+ Returns:
+ True, if click is inside any region or rectangle. Otherwise False.
"""
snippets = [{'snippet': '''
-
-
-
+
+
+
+
+
'''}]
response_tag = 'imageresponse'
@@ -1750,19 +1775,17 @@ class ImageResponse(LoncapaResponse):
def setup_response(self):
self.ielements = self.inputfields
- self.answer_ids = [ie.get('id') for ie in self.ielements]
+ self.answer_ids = [ie.get('id') for ie in self.ielements]
def get_score(self, student_answers):
correct_map = CorrectMap()
expectedset = self.get_answers()
-
- for aid in self.answer_ids: # loop through IDs of fields in our stanza
- given = student_answers[aid] # this should be a string of the form '[x,y]'
-
+ for aid in self.answer_ids: # loop through IDs of
+ # fields in our stanza
+ given = student_answers[aid] # this should be a string of the form '[x,y]'
correct_map.set(aid, 'incorrect')
- if not given: # No answer to parse. Mark as incorrect and move on
+ if not given: # No answer to parse. Mark as incorrect and move on
continue
-
# parse given answer
m = re.match('\[([0-9]+),([0-9]+)]', given.strip().replace(' ', ''))
if not m:
@@ -1770,28 +1793,481 @@ class ImageResponse(LoncapaResponse):
'error grading %s (input=%s)' % (aid, given))
(gx, gy) = [int(x) for x in m.groups()]
- # Check whether given point lies in any of the solution rectangles
- solution_rectangles = expectedset[aid].split(';')
- for solution_rectangle in solution_rectangles:
- # parse expected answer
- # TODO: Compile regexp on file load
- m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]',
- solution_rectangle.strip().replace(' ', ''))
- if not m:
- msg = 'Error in problem specification! cannot parse rectangle in %s' % (
- etree.tostring(self.ielements[aid], pretty_print=True))
- raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
- (llx, lly, urx, ury) = [int(x) for x in m.groups()]
-
- # answer is correct if (x,y) is within the specified rectangle
- if (llx <= gx <= urx) and (lly <= gy <= ury):
- correct_map.set(aid, 'correct')
- break
+ rectangles, regions = expectedset
+ if rectangles[aid]: # rectangles part - for backward compatibility
+ # Check whether given point lies in any of the solution rectangles
+ solution_rectangles = rectangles[aid].split(';')
+ for solution_rectangle in solution_rectangles:
+ # parse expected answer
+ # TODO: Compile regexp on file load
+ m = re.match('[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]',
+ solution_rectangle.strip().replace(' ', ''))
+ if not m:
+ msg = 'Error in problem specification! cannot parse rectangle in %s' % (
+ etree.tostring(self.ielements[aid], pretty_print=True))
+ raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
+ (llx, lly, urx, ury) = [int(x) for x in m.groups()]
+ # answer is correct if (x,y) is within the specified rectangle
+ if (llx <= gx <= urx) and (lly <= gy <= ury):
+ correct_map.set(aid, 'correct')
+ break
+ if correct_map[aid]['correctness'] != 'correct' and regions[aid]:
+ parsed_region = json.loads(regions[aid])
+ if parsed_region:
+ if type(parsed_region[0][0]) != list:
+ # we have [[1,2],[3,4],[5,6]] - single region
+ # instead of [[[1,2],[3,4],[5,6], [[1,2],[3,4],[5,6]]]
+ # or [[[1,2],[3,4],[5,6]]] - multiple regions syntax
+ parsed_region = [parsed_region]
+ for region in parsed_region:
+ polygon = MultiPoint(region).convex_hull
+ if (polygon.type == 'Polygon' and
+ polygon.contains(Point(gx, gy))):
+ correct_map.set(aid, 'correct')
+ break
return correct_map
def get_answers(self):
- return dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements])
+ return (dict([(ie.get('id'), ie.get('rectangle')) for ie in self.ielements]),
+ dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))
+#-----------------------------------------------------------------------------
+
+class OpenEndedResponse(LoncapaResponse):
+ """
+ Grade student open ended responses using an external grading system,
+ accessed through the xqueue system.
+
+ Expects 'xqueue' dict in ModuleSystem with the following keys that are
+ needed by OpenEndedResponse:
+
+ system.xqueue = { 'interface': XqueueInterface object,
+ 'callback_url': Per-StudentModule callback URL
+ where results are posted (string),
+ }
+
+ External requests are only submitted for student submission grading
+ (i.e. and not for getting reference answers)
+
+ By default, uses the OpenEndedResponse.DEFAULT_QUEUE queue.
+ """
+
+ DEFAULT_QUEUE = 'open-ended'
+ DEFAULT_MESSAGE_QUEUE = 'open-ended-message'
+ response_tag = 'openendedresponse'
+ allowed_inputfields = ['openendedinput']
+ max_inputfields = 1
+
+ def setup_response(self):
+ '''
+ Configure OpenEndedResponse from XML.
+ '''
+ xml = self.xml
+ self.url = xml.get('url', None)
+ self.queue_name = xml.get('queuename', self.DEFAULT_QUEUE)
+ self.message_queue_name = xml.get('message-queuename', self.DEFAULT_MESSAGE_QUEUE)
+
+ # The openendedparam tag encapsulates all grader settings
+ oeparam = self.xml.find('openendedparam')
+ prompt = self.xml.find('prompt')
+ rubric = self.xml.find('openendedrubric')
+
+ #This is needed to attach feedback to specific responses later
+ self.submission_id=None
+ self.grader_id=None
+
+ if oeparam is None:
+ raise ValueError("No oeparam found in problem xml.")
+ if prompt is None:
+ raise ValueError("No prompt found in problem xml.")
+ if rubric is None:
+ raise ValueError("No rubric found in problem xml.")
+
+ self._parse(oeparam, prompt, rubric)
+
+ @staticmethod
+ def stringify_children(node):
+ """
+ Modify code from stringify_children in xmodule. Didn't import directly
+ in order to avoid capa depending on xmodule (seems to be avoided in
+ code)
+ """
+ parts=[node.text if node.text is not None else '']
+ for p in node.getchildren():
+ parts.append(etree.tostring(p, with_tail=True, encoding='unicode'))
+
+ return ' '.join(parts)
+
+ def _parse(self, oeparam, prompt, rubric):
+ '''
+ Parse OpenEndedResponse XML:
+ self.initial_display
+ self.payload - dict containing keys --
+ 'grader' : path to grader settings file, 'problem_id' : id of the problem
+
+ self.answer - What to display when show answer is clicked
+ '''
+ # Note that OpenEndedResponse is agnostic to the specific contents of grader_payload
+ prompt_string = self.stringify_children(prompt)
+ rubric_string = self.stringify_children(rubric)
+
+ grader_payload = oeparam.find('grader_payload')
+ grader_payload = grader_payload.text if grader_payload is not None else ''
+
+ #Update grader payload with student id. If grader payload not json, error.
+ try:
+ parsed_grader_payload = json.loads(grader_payload)
+ # NOTE: self.system.location is valid because the capa_module
+ # __init__ adds it (easiest way to get problem location into
+ # response types)
+ except TypeError, ValueError:
+ log.exception("Grader payload %r is not a json object!", grader_payload)
+
+ self.initial_display = find_with_default(oeparam, 'initial_display', '')
+ self.answer = find_with_default(oeparam, 'answer_display', 'No answer given.')
+
+ parsed_grader_payload.update({
+ 'location' : self.system.location,
+ 'course_id' : self.system.course_id,
+ 'prompt' : prompt_string,
+ 'rubric' : rubric_string,
+ 'initial_display' : self.initial_display,
+ 'answer' : self.answer,
+ })
+
+ updated_grader_payload = json.dumps(parsed_grader_payload)
+
+ self.payload = {'grader_payload': updated_grader_payload}
+
+ try:
+ self.max_score = int(find_with_default(oeparam, 'max_score', 1))
+ except ValueError:
+ self.max_score = 1
+
+ def handle_message_post(self,event_info):
+ """
+ Handles a student message post (a reaction to the grade they received from an open ended grader type)
+ Returns a boolean success/fail and an error message
+ """
+ survey_responses=event_info['survey_responses']
+ for tag in ['feedback', 'submission_id', 'grader_id', 'score']:
+ if tag not in survey_responses:
+ return False, "Could not find needed tag {0}".format(tag)
+ try:
+ submission_id=int(survey_responses['submission_id'])
+ grader_id = int(survey_responses['grader_id'])
+ feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))
+ score = int(survey_responses['score'])
+ except:
+ error_message=("Could not parse submission id, grader id, "
+ "or feedback from message_post ajax call. Here is the message data: {0}".format(survey_responses))
+ log.exception(error_message)
+ return False, "There was an error saving your feedback. Please contact course staff."
+
+ qinterface = self.system.xqueue['interface']
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+ anonymous_student_id = self.system.anonymous_student_id
+ queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
+ anonymous_student_id +
+ self.answer_id)
+
+ xheader = xqueue_interface.make_xheader(
+ lms_callback_url=self.system.xqueue['callback_url'],
+ lms_key=queuekey,
+ queue_name=self.message_queue_name
+ )
+
+ student_info = {'anonymous_student_id': anonymous_student_id,
+ 'submission_time': qtime,
+ }
+ contents= {
+ 'feedback' : feedback,
+ 'submission_id' : submission_id,
+ 'grader_id' : grader_id,
+ 'score': score,
+ 'student_info' : json.dumps(student_info),
+ }
+
+ (error, msg) = qinterface.send_to_queue(header=xheader,
+ body=json.dumps(contents))
+
+ #Convert error to a success value
+ success=True
+ if error:
+ success=False
+
+ return success, "Successfully submitted your feedback."
+
+
+ def get_score(self, student_answers):
+
+ try:
+ submission = student_answers[self.answer_id]
+ except KeyError:
+ msg = ('Cannot get student answer for answer_id: {0}. student_answers {1}'
+ .format(self.answer_id, student_answers))
+ log.exception(msg)
+ raise LoncapaProblemError(msg)
+
+ # Prepare xqueue request
+ #------------------------------------------------------------
+
+ qinterface = self.system.xqueue['interface']
+ qtime = datetime.strftime(datetime.now(), xqueue_interface.dateformat)
+
+ anonymous_student_id = self.system.anonymous_student_id
+
+ # Generate header
+ queuekey = xqueue_interface.make_hashkey(str(self.system.seed) + qtime +
+ anonymous_student_id +
+ self.answer_id)
+
+ xheader = xqueue_interface.make_xheader(lms_callback_url=self.system.xqueue['callback_url'],
+ lms_key=queuekey,
+ queue_name=self.queue_name)
+
+ self.context.update({'submission': submission})
+
+ contents = self.payload.copy()
+
+ # Metadata related to the student submission revealed to the external grader
+ student_info = {'anonymous_student_id': anonymous_student_id,
+ 'submission_time': qtime,
+ }
+
+ #Update contents with student response and student info
+ contents.update({
+ 'student_info': json.dumps(student_info),
+ 'student_response': submission,
+ 'max_score' : self.max_score,
+ })
+
+ # Submit request. When successful, 'msg' is the prior length of the queue
+ (error, msg) = qinterface.send_to_queue(header=xheader,
+ body=json.dumps(contents))
+
+ # State associated with the queueing request
+ queuestate = {'key': queuekey,
+ 'time': qtime,}
+
+ cmap = CorrectMap()
+ if error:
+ cmap.set(self.answer_id, queuestate=None,
+ msg='Unable to deliver your submission to grader. (Reason: {0}.)'
+ ' Please try again later.'.format(msg))
+ else:
+ # Queueing mechanism flags:
+ # 1) Backend: Non-null CorrectMap['queuestate'] indicates that
+ # the problem has been queued
+ # 2) Frontend: correctness='incomplete' eventually trickles down
+ # through inputtypes.textbox and .filesubmission to inform the
+ # browser that the submission is queued (and it could e.g. poll)
+ cmap.set(self.answer_id, queuestate=queuestate,
+ correctness='incomplete', msg=msg)
+
+ return cmap
+
+ def update_score(self, score_msg, oldcmap, queuekey):
+ log.debug(score_msg)
+ score_msg = self._parse_score_msg(score_msg)
+ if not score_msg.valid:
+ oldcmap.set(self.answer_id,
+ msg = 'Invalid grader reply. Please contact the course staff.')
+ return oldcmap
+
+ correctness = 'correct' if score_msg.correct else 'incorrect'
+
+ # TODO: Find out how this is used elsewhere, if any
+ self.context['correct'] = correctness
+
+ # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
+ # does not match, we keep waiting for the score_msg whose key actually matches
+ if oldcmap.is_right_queuekey(self.answer_id, queuekey):
+ # Sanity check on returned points
+ points = score_msg.points
+ if points < 0:
+ points = 0
+
+ # Queuestate is consumed, so reset it to None
+ oldcmap.set(self.answer_id, npoints=points, correctness=correctness,
+ msg = score_msg.msg.replace(' ', ' '), queuestate=None)
+ else:
+ log.debug('OpenEndedResponse: queuekey {0} does not match for answer_id={1}.'.format(
+ queuekey, self.answer_id))
+
+ return oldcmap
+
+ def get_answers(self):
+ anshtml = '{0}
'.format(self.answer)
+ return {self.answer_id: anshtml}
+
+ def get_initial_display(self):
+ return {self.answer_id: self.initial_display}
+
+ def _convert_longform_feedback_to_html(self, response_items):
+ """
+ Take in a dictionary, and return html strings for display to student.
+ Input:
+ response_items: Dictionary with keys success, feedback.
+ if success is True, feedback should be a dictionary, with keys for
+ types of feedback, and the corresponding feedback values.
+ if success is False, feedback is actually an error string.
+
+ NOTE: this will need to change when we integrate peer grading, because
+ that will have more complex feedback.
+
+ Output:
+ String -- html that can be displayed to the student.
+ """
+
+ # We want to display available feedback in a particular order.
+ # This dictionary specifies which goes first--lower first.
+ priorities = {# These go at the start of the feedback
+ 'spelling': 0,
+ 'grammar': 1,
+ # needs to be after all the other feedback
+ 'markup_text': 3}
+
+ default_priority = 2
+
+ def get_priority(elt):
+ """
+ Args:
+ elt: a tuple of feedback-type, feedback
+ Returns:
+ the priority for this feedback type
+ """
+ return priorities.get(elt[0], default_priority)
+
+
+ def encode_values(feedback_type,value):
+ feedback_type=str(feedback_type).encode('ascii', 'ignore')
+ if not isinstance(value,basestring):
+ value=str(value)
+ value=value.encode('ascii', 'ignore')
+ return feedback_type,value
+
+ def format_feedback(feedback_type, value):
+ feedback_type,value=encode_values(feedback_type,value)
+ feedback= """
+
+ {value}
+
+ """.format(feedback_type=feedback_type, value=value)
+
+ return feedback
+
+ def format_feedback_hidden(feedback_type , value):
+ feedback_type,value=encode_values(feedback_type,value)
+ feedback = """
+
+ {value}
+
+ """.format(feedback_type=feedback_type, value=value)
+ return feedback
+
+
+ # TODO (vshnayder): design and document the details of this format so
+ # that we can do proper escaping here (e.g. are the graders allowed to
+ # include HTML?)
+
+ for tag in ['success', 'feedback', 'submission_id', 'grader_id']:
+ if tag not in response_items:
+ return format_feedback('errors', 'Error getting feedback')
+
+ feedback_items = response_items['feedback']
+ try:
+ feedback = json.loads(feedback_items)
+ except (TypeError, ValueError):
+ log.exception("feedback_items have invalid json %r", feedback_items)
+ return format_feedback('errors', 'Could not parse feedback')
+
+ if response_items['success']:
+ if len(feedback) == 0:
+ return format_feedback('errors', 'No feedback available')
+
+ feedback_lst = sorted(feedback.items(), key=get_priority)
+
+ feedback_list_part1 = u"\n".join(format_feedback(k, v) for k, v in feedback_lst)
+ else:
+ feedback_list_part1 = format_feedback('errors', response_items['feedback'])
+
+ feedback_list_part2=(u"\n".join([format_feedback_hidden(feedback_type,value)
+ for feedback_type,value in response_items.items()
+ if feedback_type in ['submission_id', 'grader_id']]))
+
+ return u"\n".join([feedback_list_part1,feedback_list_part2])
+
+ def _format_feedback(self, response_items):
+ """
+ Input:
+ Dictionary called feedback. Must contain keys seen below.
+ Output:
+ Return error message or feedback template
+ """
+
+ feedback = self._convert_longform_feedback_to_html(response_items)
+
+ if not response_items['success']:
+ return self.system.render_template("open_ended_error.html",
+ {'errors' : feedback})
+
+ feedback_template = self.system.render_template("open_ended_feedback.html", {
+ 'grader_type': response_items['grader_type'],
+ 'score': "{0} / {1}".format(response_items['score'], self.max_score),
+ 'feedback': feedback,
+ })
+
+ return feedback_template
+
+
+ def _parse_score_msg(self, score_msg):
+ """
+ Grader reply is a JSON-dump of the following dict
+ { 'correct': True/False,
+ 'score': Numeric value (floating point is okay) to assign to answer
+ 'msg': grader_msg
+ 'feedback' : feedback from grader
+ }
+
+ Returns (valid_score_msg, correct, score, msg):
+ valid_score_msg: Flag indicating valid score_msg format (Boolean)
+ correct: Correctness of submission (Boolean)
+ score: Points to be assigned (numeric, can be float)
+ """
+ fail = ScoreMessage(valid=False, correct=False, points=0, msg='')
+ try:
+ score_result = json.loads(score_msg)
+ except (TypeError, ValueError):
+ log.error("External grader message should be a JSON-serialized dict."
+ " Received score_msg = {0}".format(score_msg))
+ return fail
+
+ if not isinstance(score_result, dict):
+ log.error("External grader message should be a JSON-serialized dict."
+ " Received score_result = {0}".format(score_result))
+ return fail
+
+
+ for tag in ['score', 'feedback', 'grader_type', 'success', 'grader_id', 'submission_id']:
+ if tag not in score_result:
+ log.error("External grader message is missing required tag: {0}"
+ .format(tag))
+ return fail
+
+ feedback = self._format_feedback(score_result)
+
+ self.submission_id=score_result['submission_id']
+ self.grader_id=score_result['grader_id']
+
+ # HACK: for now, just assume it's correct if you got more than 2/3.
+ # Also assumes that score_result['score'] is an integer.
+ score_ratio = int(score_result['score']) / float(self.max_score)
+ correct = (score_ratio >= 0.66)
+
+ #Currently ignore msg and only return feedback (which takes the place of msg)
+ return ScoreMessage(valid=True, correct=correct,
+ points=score_result['score'], msg=feedback)
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
@@ -1810,4 +2286,5 @@ __all__ = [CodeResponse,
ChoiceResponse,
MultipleChoiceResponse,
TrueFalseResponse,
- JavascriptResponse]
+ JavascriptResponse,
+ OpenEndedResponse]
diff --git a/common/lib/capa/capa/templates/crystallography.html b/common/lib/capa/capa/templates/crystallography.html
index 2370f59dd2..8dcbff354b 100644
--- a/common/lib/capa/capa/templates/crystallography.html
+++ b/common/lib/capa/capa/templates/crystallography.html
@@ -1,34 +1,28 @@