diff --git a/courseware/capa/calc.py b/courseware/capa/calc.py index 571f4991d5..63c5c9de01 100644 --- a/courseware/capa/calc.py +++ b/courseware/capa/calc.py @@ -2,6 +2,7 @@ import copy import logging import math import operator +import re import numpy import scipy.constants @@ -10,7 +11,7 @@ from pyparsing import Word, alphas, nums, oneOf, Literal from pyparsing import ZeroOrMore, OneOrMore, StringStart from pyparsing import StringEnd, Optional, Forward from pyparsing import CaselessLiteral, Group, StringEnd -from pyparsing import NoMatch, stringEnd +from pyparsing import NoMatch, stringEnd, alphanums default_functions = {'sin' : numpy.sin, 'cos' : numpy.cos, @@ -35,10 +36,40 @@ default_variables = {'j':numpy.complex(0,1), log = logging.getLogger("mitx.courseware.capa") -def evaluator(variables, functions, string): +class UndefinedVariable(Exception): + def raiseself(self): + ''' Helper so we can use inside of a lambda ''' + raise self + + +general_whitespace = re.compile('[^\w]+') +def check_variables(string, variables): + ''' Confirm the only variables in string are defined. + + Pyparsing uses a left-to-right parser, which makes the more + elegant approach pretty hopeless. + + achar = reduce(lambda a,b:a|b ,map(Literal,alphas)) # Any alphabetic character + undefined_variable = achar + Word(alphanums) + undefined_variable.setParseAction(lambda x:UndefinedVariable("".join(x)).raiseself()) + varnames = varnames | undefined_variable''' + possible_variables = re.split(general_whitespace, string) # List of all alnums in string + bad_variables = list() + for v in possible_variables: + if len(v) == 0: + continue + if v[0] <= '9' and '0' <= 'v': # Skip things that begin with numbers + continue + if v not in variables: + bad_variables.append(v) + if len(bad_variables)>0: + raise UndefinedVariable(' '.join(bad_variables)) + +def evaluator(variables, functions, string, cs=False): ''' Evaluate an expression. Variables are passed as a dictionary from string to value. Unary functions are passed as a dictionary from string to function. Variables must be floats. + cs: Case sensitive TODO: Fix it so we can pass integers and complex numbers in variables dict ''' @@ -51,6 +82,19 @@ def evaluator(variables, functions, string): all_functions = copy.copy(default_functions) all_functions.update(functions) + if not cs: + string_cs = string.lower() + for v in all_variables.keys(): + all_variables[v.lower()]=all_variables[v] + for f in all_functions.keys(): + all_functions[f.lower()]=all_functions[f] + CasedLiteral = CaselessLiteral + else: + string_cs = string + CasedLiteral = Literal + + check_variables(string_cs, set(all_variables.keys()+all_functions.keys())) + if string.strip() == "": return float('nan') ops = { "^" : operator.pow, @@ -137,19 +181,19 @@ def evaluator(variables, functions, string): # We sort the list so that var names (like "e2") match before # mathematical constants (like "e"). This is kind of a hack. all_variables_keys = sorted(all_variables.keys(), key=len, reverse=True) - varnames = sreduce(lambda x,y:x|y, map(lambda x: CaselessLiteral(x), all_variables_keys)) + varnames = sreduce(lambda x,y:x|y, map(lambda x: CasedLiteral(x), all_variables_keys)) varnames.setParseAction(lambda x:map(lambda y:all_variables[y], x)) else: varnames=NoMatch() # Same thing for functions. if len(all_functions)>0: - funcnames = sreduce(lambda x,y:x|y, map(lambda x: CaselessLiteral(x), all_functions.keys())) + funcnames = sreduce(lambda x,y:x|y, map(lambda x: CasedLiteral(x), all_functions.keys())) function = funcnames+lpar.suppress()+expr+rpar.suppress() function.setParseAction(func_parse_action) else: function = NoMatch() - atom = number | varnames | lpar+expr+rpar | function + atom = number | function | varnames | lpar+expr+rpar factor << (atom + ZeroOrMore(exp+atom)).setParseAction(exp_parse_action) # 7^6 paritem = factor + ZeroOrMore(Literal('||')+factor) # 5k || 4k paritem=paritem.setParseAction(parallel) diff --git a/courseware/capa/capa_problem.py b/courseware/capa/capa_problem.py index 73ef168cf0..e9778008c1 100644 --- a/courseware/capa/capa_problem.py +++ b/courseware/capa/capa_problem.py @@ -15,7 +15,7 @@ from mako.template import Template from util import contextualize_text from inputtypes import textline, schematic -from responsetypes import numericalresponse, formularesponse, customresponse, schematicresponse +from responsetypes import numericalresponse, formularesponse, customresponse, schematicresponse, StudentInputError import calc import eia diff --git a/courseware/capa/responsetypes.py b/courseware/capa/responsetypes.py index 7f2b61e997..8809d935b1 100644 --- a/courseware/capa/responsetypes.py +++ b/courseware/capa/responsetypes.py @@ -3,8 +3,9 @@ import math import numpy import random import scipy +import traceback -from calc import evaluator +from calc import evaluator, UndefinedVariable from django.conf import settings from util import contextualize_text @@ -84,6 +85,9 @@ class customresponse(object): # be handled by capa_problem return {} +class StudentInputError(Exception): + pass + class formularesponse(object): def __init__(self, xml, context): self.xml = xml @@ -95,6 +99,17 @@ class formularesponse(object): self.answer_id = xml.xpath('//*[@id=$id]//textline/@id', id=xml.get('id'))[0] self.context = context + ts = xml.get('type') + if ts == None: + typeslist = [] + else: + typeslist = ts.split(',') + if 'ci' in typeslist: # Case insensitive + self.case_sensitive = False + elif 'cs' in typeslist: # Case sensitive + self.case_sensitive = True + else: # Default + self.case_sensitive = False def grade(self, student_answers): @@ -113,7 +128,16 @@ class formularesponse(object): instructor_variables[str(var)] = value student_variables[str(var)] = value instructor_result = evaluator(instructor_variables,dict(),self.correct_answer) - student_result = evaluator(student_variables,dict(),student_answers[self.answer_id]) + try: + #print student_variables,dict(),student_answers[self.answer_id] + student_result = evaluator(student_variables,dict(), + student_answers[self.answer_id], + cs = self.case_sensitive) + except UndefinedVariable as uv: + raise StudentInputError('Undefined: '+uv.message) + except: + #traceback.print_exc() + raise StudentInputError("Syntax Error") if math.isnan(student_result) or math.isinf(student_result): return {self.answer_id:"incorrect"} if not compare_with_tolerance(student_result, instructor_result, self.tolerance): diff --git a/courseware/modules/capa_module.py b/courseware/modules/capa_module.py index 378c322c42..366d0463e4 100644 --- a/courseware/modules/capa_module.py +++ b/courseware/modules/capa_module.py @@ -21,7 +21,7 @@ from mitxmako.shortcuts import render_to_response, render_to_string from django.http import Http404 from x_module import XModule -from courseware.capa.capa_problem import LoncapaProblem +from courseware.capa.capa_problem import LoncapaProblem, StudentInputError import courseware.content_parser as content_parser log = logging.getLogger("mitx.courseware") @@ -94,6 +94,10 @@ class Module(XModule): if self.max_attempts != None: attempts_str = " ({a}/{m})".format(a=self.attempts, m=self.max_attempts) + # We don't need a "save" button if infinite number of attempts and non-randomized + if self.max_attempts == None and self.rerandomize == False: + save_button = False + # Check if explanation is available, and if so, give a link explain="" if self.lcp.done and self.explain_available=='attempted': @@ -277,22 +281,27 @@ class Module(XModule): lcp_id = self.lcp.problem_id filename = self.lcp.filename correct_map = self.lcp.grade_answers(answers) + except StudentInputError as inst: + self.lcp = LoncapaProblem(filename, id=lcp_id, state=old_state) + traceback.print_exc() +# print {'error':sys.exc_info(), +# 'answers':answers, +# 'seed':self.lcp.seed, +# 'filename':self.lcp.filename} + return json.dumps({'success':inst.message}) except: self.lcp = LoncapaProblem(filename, id=lcp_id, state=old_state) traceback.print_exc() - print {'error':sys.exc_info(), - 'answers':answers, - 'seed':self.lcp.seed, - 'filename':self.lcp.filename} - return json.dumps({'success':'syntax'}) + return json.dumps({'success':'Unknown Error'}) + self.attempts = self.attempts + 1 self.lcp.done=True - success = 'finished' + success = 'correct' for i in correct_map: if correct_map[i]!='correct': - success = 'errors' + success = 'incorrect' js=json.dumps({'correct_map' : correct_map, 'success' : success}) diff --git a/courseware/tests.py b/courseware/tests.py index 39edcc0e72..b0d7cad19f 100644 --- a/courseware/tests.py +++ b/courseware/tests.py @@ -32,8 +32,22 @@ class ModelsTest(unittest.TestCase): self.assertTrue(abs(calc.evaluator(variables, functions, "k*T/q-0.025"))<0.001) exception_happened = False try: - evaluator({},{}, "5+7 QWSEKO") + calc.evaluator({},{}, "5+7 QWSEKO") except: exception_happened = True self.assertTrue(exception_happened) - + + try: + calc.evaluator({'r1':5},{}, "r1+r2") + except calc.UndefinedVariable: + pass + + self.assertEqual(calc.evaluator(variables, functions, "r1*r3"), 8.0) + + exception_happened = False + try: + calc.evaluator(variables, functions, "r1*r3", cs=True) + except: + exception_happened = True + self.assertTrue(exception_happened) +