diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 9a5a15a696..ca78f635e3 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -30,6 +30,8 @@ import sys from lxml import etree from xml.sax.saxutils import unescape +import chem +import chem.chemcalc import calc from correctmap import CorrectMap import eia @@ -53,7 +55,8 @@ entry_types = ['textline', 'radiogroup', 'checkboxgroup', 'filesubmission', - 'javascriptinput',] + 'javascriptinput', + 'chemicalequationinput'] # extra things displayed after "show answers" is pressed solution_types = ['solution'] @@ -72,7 +75,8 @@ global_context = {'random': random, 'math': math, 'scipy': scipy, 'calc': calc, - 'eia': eia} + 'eia': eia, + 'chemcalc': chem.chemcalc} # These should be removed from HTML output, including all subelements html_problem_semantics = ["codeparam", "responseparam", "answer", "script", "hintgroup"] @@ -436,7 +440,7 @@ class LoncapaProblem(object): sys.path = original_path + self._extract_system_path(script) stype = script.get('type') - + if stype: if 'javascript' in stype: continue # skip javascript @@ -478,8 +482,8 @@ class LoncapaProblem(object): problemid = problemtree.get('id') # my ID - if problemtree.tag in inputtypes.get_input_xml_tags(): - + if problemtree.tag in inputtypes.registered_input_tags(): + # If this is an inputtype subtree, let it render itself. status = "unsubmitted" msg = '' hint = '' @@ -496,20 +500,17 @@ class LoncapaProblem(object): value = self.student_answers[problemid] # do the rendering - render_object = inputtypes.SimpleInput(system=self.system, - xml=problemtree, - state={'value': value, - 'status': status, - 'id': problemtree.get('id'), - 'feedback': {'message': msg, - 'hint': hint, - 'hintmode': hintmode, - } - }, - use='capa_input') - # function(problemtree, value, status, msg) - # render the special response (textline, schematic,...) - return render_object.get_html() + + state = {'value': value, + 'status': status, + 'id': problemtree.get('id'), + 'feedback': {'message': msg, + 'hint': hint, + 'hintmode': hintmode,}} + + input_type_cls = inputtypes.get_class_for_tag(problemtree.tag) + the_input = input_type_cls(self.system, problemtree, state) + return the_input.get_html() # let each Response render itself if problemtree in self.responders: diff --git a/common/lib/capa/capa/chem/__init__.py b/common/lib/capa/capa/chem/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/common/lib/capa/capa/chem/__init__.py @@ -0,0 +1 @@ + diff --git a/common/lib/capa/capa/chem/chemcalc.py b/common/lib/capa/capa/chem/chemcalc.py new file mode 100644 index 0000000000..389e688cf4 --- /dev/null +++ b/common/lib/capa/capa/chem/chemcalc.py @@ -0,0 +1,433 @@ +from __future__ import division +import copy +from fractions import Fraction +import logging +import math +import operator +import re +import numpy +import numbers +import scipy.constants + +from pyparsing import (Literal, Keyword, Word, nums, StringEnd, Optional, + Forward, OneOrMore, ParseException) +import nltk +from nltk.tree import Tree + +ARROWS = ('<->', '->') + +## Defines a simple pyparsing tokenizer for chemical equations +elements = ['Ac','Ag','Al','Am','Ar','As','At','Au','B','Ba','Be', + 'Bh','Bi','Bk','Br','C','Ca','Cd','Ce','Cf','Cl','Cm', + 'Cn','Co','Cr','Cs','Cu','Db','Ds','Dy','Er','Es','Eu', + 'F','Fe','Fl','Fm','Fr','Ga','Gd','Ge','H','He','Hf', + 'Hg','Ho','Hs','I','In','Ir','K','Kr','La','Li','Lr', + 'Lu','Lv','Md','Mg','Mn','Mo','Mt','N','Na','Nb','Nd', + 'Ne','Ni','No','Np','O','Os','P','Pa','Pb','Pd','Pm', + 'Po','Pr','Pt','Pu','Ra','Rb','Re','Rf','Rg','Rh','Rn', + 'Ru','S','Sb','Sc','Se','Sg','Si','Sm','Sn','Sr','Ta', + 'Tb','Tc','Te','Th','Ti','Tl','Tm','U','Uuo','Uup', + 'Uus','Uut','V','W','Xe','Y','Yb','Zn','Zr'] +digits = map(str, range(10)) +symbols = list("[](){}^+-/") +phases = ["(s)", "(l)", "(g)", "(aq)"] +tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + phases)) +tokenizer = OneOrMore(tokens) + StringEnd() + + +def _orjoin(l): + return "'" + "' | '".join(l) + "'" + +## Defines an NLTK parser for tokenized expressions +grammar = """ + S -> multimolecule | multimolecule '+' S + multimolecule -> count molecule | molecule + count -> number | number '/' number + molecule -> unphased | unphased phase + unphased -> group | paren_group_round | paren_group_square + element -> """ + _orjoin(elements) + """ + digit -> """ + _orjoin(digits) + """ + phase -> """ + _orjoin(phases) + """ + number -> digit | digit number + group -> suffixed | suffixed group + paren_group_round -> '(' group ')' + paren_group_square -> '[' group ']' + plus_minus -> '+' | '-' + number_suffix -> number + ion_suffix -> '^' number plus_minus | '^' plus_minus + suffix -> number_suffix | number_suffix ion_suffix | ion_suffix + unsuffixed -> element | paren_group_round | paren_group_square + + suffixed -> unsuffixed | unsuffixed suffix +""" +parser = nltk.ChartParser(nltk.parse_cfg(grammar)) + + +def _clean_parse_tree(tree): + ''' The parse tree contains a lot of redundant + nodes. E.g. paren_groups have groups as children, etc. This will + clean up the tree. + ''' + def unparse_number(n): + ''' Go from a number parse tree to a number ''' + if len(n) == 1: + rv = n[0][0] + else: + rv = n[0][0] + unparse_number(n[1]) + return rv + + def null_tag(n): + ''' Remove a tag ''' + return n[0] + + def ion_suffix(n): + '''1. "if" part handles special case + 2. "else" part is general behaviour ''' + + if n[1:][0].node == 'number' and n[1:][0][0][0] == '1': + # if suffix is explicitly 1, like ^1- + # strip 1, leave only sign: ^- + return nltk.tree.Tree(n.node, n[2:]) + else: + return nltk.tree.Tree(n.node, n[1:]) + + dispatch = {'number': lambda x: nltk.tree.Tree("number", [unparse_number(x)]), + 'unphased': null_tag, + 'unsuffixed': null_tag, + 'number_suffix': lambda x: nltk.tree.Tree('number_suffix', [unparse_number(x[0])]), + 'suffixed': lambda x: len(x) > 1 and x or x[0], + 'ion_suffix': ion_suffix, + 'paren_group_square': lambda x: nltk.tree.Tree(x.node, x[1]), + 'paren_group_round': lambda x: nltk.tree.Tree(x.node, x[1])} + + if type(tree) == str: + return tree + + old_node = None + ## This loop means that if a node is processed, and returns a child, + ## the child will be processed. + while tree.node in dispatch and tree.node != old_node: + old_node = tree.node + tree = dispatch[tree.node](tree) + + children = [] + for child in tree: + child = _clean_parse_tree(child) + children.append(child) + + tree = nltk.tree.Tree(tree.node, children) + + return tree + + +def _merge_children(tree, tags): + ''' nltk, by documentation, cannot do arbitrary length + groups. Instead of: + (group 1 2 3 4) + It has to handle this recursively: + (group 1 (group 2 (group 3 (group 4)))) + We do the cleanup of converting from the latter to the former. + ''' + if tree is None: + # There was a problem--shouldn't have empty trees (NOTE: see this with input e.g. 'H2O(', or 'Xe+'). + # Haven't grokked the code to tell if this is indeed the right thing to do. + raise ParseException("Shouldn't have empty trees") + + if type(tree) == str: + return tree + + merged_children = [] + done = False + #print '00000', tree + ## Merge current tag + while not done: + done = True + for child in tree: + if type(child) == nltk.tree.Tree and child.node == tree.node and tree.node in tags: + merged_children = merged_children + list(child) + done = False + else: + merged_children = merged_children + [child] + tree = nltk.tree.Tree(tree.node, merged_children) + merged_children = [] + #print '======',tree + + # And recurse + children = [] + for child in tree: + children.append(_merge_children(child, tags)) + + #return tree + return nltk.tree.Tree(tree.node, children) + + +def _render_to_html(tree): + ''' Renders a cleaned tree to HTML ''' + + def molecule_count(tree, children): + # If an integer, return that integer + if len(tree) == 1: + return tree[0][0] + # If a fraction, return the fraction + if len(tree) == 3: + return " {num}{den} ".format(num=tree[0][0], den=tree[2][0]) + return "Error" + + def subscript(tree, children): + return "{sub}".format(sub=children) + + def superscript(tree, children): + return "{sup}".format(sup=children) + + def round_brackets(tree, children): + return "({insider})".format(insider=children) + + def square_brackets(tree, children): + return "[{insider}]".format(insider=children) + + dispatch = {'count': molecule_count, + 'number_suffix': subscript, + 'ion_suffix': superscript, + 'paren_group_round': round_brackets, + 'paren_group_square': square_brackets} + + if type(tree) == str: + return tree + else: + children = "".join(map(_render_to_html, tree)) + if tree.node in dispatch: + return dispatch[tree.node](tree, children) + else: + return children.replace(' ', '') + + + +def render_to_html(eq): + ''' + Render a chemical equation string to html. + + Renders each molecule separately, and returns invalid input wrapped in a . + ''' + def err(s): + "Render as an error span" + return '{0}'.format(s) + + def render_arrow(arrow): + """Turn text arrows into pretty ones""" + if arrow == '->': + return u'\u2192' + if arrow == '<->': + return u'\u2194' + + # this won't be reached unless we add more arrow types, but keep it to avoid explosions when + # that happens. + return arrow + + def render_expression(ex): + """ + Render a chemical expression--no arrows. + """ + try: + return _render_to_html(_get_final_tree(ex)) + except ParseException: + return err(ex) + + def spanify(s): + return u'{0}'.format(s) + + left, arrow, right = split_on_arrow(eq) + if arrow == '': + # only one side + return spanify(render_expression(left)) + + + return spanify(render_expression(left) + render_arrow(arrow) + render_expression(right)) + + +def _get_final_tree(s): + ''' + Return final tree after merge and clean. + + Raises pyparsing.ParseException if s is invalid. + ''' + tokenized = tokenizer.parseString(s) + parsed = parser.parse(tokenized) + merged = _merge_children(parsed, {'S','group'}) + final = _clean_parse_tree(merged) + return final + + +def _check_equality(tuple1, tuple2): + ''' return True if tuples of multimolecules are equal ''' + list1 = list(tuple1) + list2 = list(tuple2) + + # Hypo: trees where are levels count+molecule vs just molecule + # cannot be sorted properly (tested on test_complex_additivity) + # But without factors and phases sorting seems to work. + + # Also for lists of multimolecules without factors and phases + # sorting seems to work fine. + list1.sort() + list2.sort() + return list1 == list2 + + +def compare_chemical_expression(s1, s2, ignore_state=False): + ''' It does comparison between two expressions. + It uses divide_chemical_expression and check if division is 1 + ''' + return divide_chemical_expression(s1, s2, ignore_state) == 1 + + +def divide_chemical_expression(s1, s2, ignore_state=False): + '''Compare two chemical expressions for equivalence up to a multiplicative factor: + + - If they are not the same chemicals, returns False. + - If they are the same, "divide" s1 by s2 to returns a factor x such that s1 / s2 == x as a Fraction object. + - if ignore_state is True, ignores phases when doing the comparison. + + Examples: + divide_chemical_expression("H2O", "3H2O") -> Fraction(1,3) + divide_chemical_expression("3H2O", "H2O") -> 3 # actually Fraction(3, 1), but compares == to 3. + divide_chemical_expression("2H2O(s) + 2CO2", "H2O(s)+CO2") -> 2 + divide_chemical_expression("H2O(s) + CO2", "3H2O(s)+2CO2") -> False + + Implementation sketch: + - extract factors and phases to standalone lists, + - compare expressions without factors and phases, + - divide lists of factors for each other and check + for equality of every element in list, + - return result of factor division + + ''' + + # parsed final trees + treedic = {} + treedic['1'] = _get_final_tree(s1) + treedic['2'] = _get_final_tree(s2) + + # strip phases and factors + # collect factors in list + for i in ('1', '2'): + treedic[i + ' cleaned_mm_list'] = [] + treedic[i + ' factors'] = [] + treedic[i + ' phases'] = [] + for el in treedic[i].subtrees(filter=lambda t: t.node == 'multimolecule'): + count_subtree = [t for t in el.subtrees() if t.node == 'count'] + group_subtree = [t for t in el.subtrees() if t.node == 'group'] + phase_subtree = [t for t in el.subtrees() if t.node == 'phase'] + if count_subtree: + if len(count_subtree[0]) > 1: + treedic[i + ' factors'].append( + int(count_subtree[0][0][0]) / + int(count_subtree[0][2][0])) + else: + treedic[i + ' factors'].append(int(count_subtree[0][0][0])) + else: + treedic[i + ' factors'].append(1.0) + if phase_subtree: + treedic[i + ' phases'].append(phase_subtree[0][0]) + else: + treedic[i + ' phases'].append(' ') + treedic[i + ' cleaned_mm_list'].append( + Tree('multimolecule', [Tree('molecule', group_subtree)])) + + # order of factors and phases must mirror the order of multimolecules, + # use 'decorate, sort, undecorate' pattern + treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'] = zip( + *sorted(zip(treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases']))) + + treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'] = zip( + *sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases']))) + + # check if expressions are correct without factors + if not _check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']): + return False + + # phases are ruled by ingore_state flag + if not ignore_state: # phases matters + if treedic['1 phases'] != treedic['2 phases']: + return False + + if any(map(lambda x, y: x / y - treedic['1 factors'][0] / treedic['2 factors'][0], + treedic['1 factors'], treedic['2 factors'])): + # factors are not proportional + return False + else: + # return ratio + return Fraction(treedic['1 factors'][0] / treedic['2 factors'][0]) + + +def split_on_arrow(eq): + """ + Split a string on an arrow. Returns left, arrow, right. If there is no arrow, returns the + entire eq in left, and '' in arrow and right. + + Return left, arrow, right. + """ + # order matters -- need to try <-> first + for arrow in ARROWS: + left, a, right = eq.partition(arrow) + if a != '': + return left, a, right + + return eq, '', '' + + +def chemical_equations_equal(eq1, eq2, exact=False): + """ + Check whether two chemical equations are the same. (equations have arrows) + + If exact is False, then they are considered equal if they differ by a + constant factor. + + arrows matter: -> and <-> are different. + + e.g. + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2') -> True + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + 2H2 -> H2O2') -> False + + chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 <-> H2O2') -> False + + chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2') -> True + chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2', exact=True) -> False + + + If there's a syntax error, we return False. + """ + + left1, arrow1, right1 = split_on_arrow(eq1) + left2, arrow2, right2 = split_on_arrow(eq2) + + if arrow1 == '' or arrow2 == '': + return False + + # TODO: may want to be able to give student helpful feedback about why things didn't work. + if arrow1 != arrow2: + # arrows don't match + return False + + try: + factor_left = divide_chemical_expression(left1, left2) + if not factor_left: + # left sides don't match + return False + + factor_right = divide_chemical_expression(right1, right2) + if not factor_right: + # right sides don't match + return False + + if factor_left != factor_right: + # factors don't match (molecule counts to add up) + return False + + if exact and factor_left != 1: + # want an exact match. + return False + + return True + except ParseException: + # Don't want external users to have to deal with parsing exceptions. Just return False. + return False diff --git a/common/lib/capa/capa/chem/tests.py b/common/lib/capa/capa/chem/tests.py new file mode 100644 index 0000000000..34d903ec1d --- /dev/null +++ b/common/lib/capa/capa/chem/tests.py @@ -0,0 +1,336 @@ +import codecs +from fractions import Fraction +from pyparsing import ParseException +import unittest + +from chemcalc import (compare_chemical_expression, divide_chemical_expression, + render_to_html, chemical_equations_equal) + +local_debug = None + +def log(s, output_type=None): + if local_debug: + print s + if output_type == 'html': + f.write(s + '\n
\n') + + +class Test_Compare_Equations(unittest.TestCase): + def test_simple_equation(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> H2O2')) + # left sides don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + 2H2 -> H2O2')) + # right sides don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> H2O')) + + # factors don't match + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> 2H2O2')) + + def test_different_factor(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('2H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + + def test_different_arrows(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 <-> 2H2O2')) + + def test_exact_match(self): + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2', + '2O2 + 2H2 -> 2H2O2', exact=True)) + + # order still doesn't matter + self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2', + 'O2 + H2 -> H2O2', exact=True)) + + + def test_syntax_errors(self): + self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2', + '2O2 + 2H2 -> 2H2O2')) + + self.assertFalse(chemical_equations_equal('H2O( -> H2O2', + 'H2O -> H2O2')) + + + self.assertFalse(chemical_equations_equal('H2 + O2 ==> H2O2', # strange arrow + '2O2 + 2H2 -> 2H2O2')) + + +class Test_Compare_Expressions(unittest.TestCase): + + def test_compare_incorrect_order_of_atoms_in_molecule(self): + self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2")) + + def test_compare_same_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O")) + + def test_compare_different_order_no_phases_no_factors_no_ions(self): + self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O")) + + def test_compare_different_order_three_multimolecule(self): + self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3")) + + def test_compare_same_factors(self): + self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O ")) + + def test_compare_different_factors(self): + self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O ")) + + def test_compare_correct_ions(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ ")) + + def test_compare_wrong_ions(self): + self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- ")) + + def test_compare_parent_groups_ions(self): + self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- ")) + + def test_compare_correct_factors_ions_and_one(self): + self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_wrong_factors_ions(self): + self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ ")) + + def test_compare_float_factors(self): + self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ ")) + + # Phases tests + def test_compare_phases_ignored(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=True)) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2", ignore_state=False)) + + def test_compare_phases_not_ignored(self): # same as previous + self.assertFalse(compare_chemical_expression( + "H2O(s) + CO2", "H2O+CO2")) + + def test_compare_phases_not_ignored_explicitly(self): + self.assertTrue(compare_chemical_expression( + "H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False)) + + # all in one cases + def test_complex_additivity(self): + self.assertTrue(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "7/2HCl + 2H20 + H2O + 5(H1H212)^70010-")) + + def test_complex_additivity_wrong(self): + self.assertFalse(compare_chemical_expression( + "5(H1H212)^70010- + 2H20 + 7/2HCl + H2O", + "2H20 + 7/2HCl + H2O + 5(H1H212)^70011-")) + + def test_complex_all_grammar(self): + self.assertTrue(compare_chemical_expression( + "5[Ni(NH3)4]^2+ + 5/2SO4^2-", + "5/2SO4^2- + 5[Ni(NH3)4]^2+")) + + # special cases + + def test_compare_one_superscript_explicitly_set(self): + self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ ")) + + def test_compare_equal_factors_differently_set(self): + self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ ")) + + def test_compare_one_subscript_explicitly_set(self): + self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102")) + + +class Test_Divide_Expressions(unittest.TestCase): + ''' as compare_ use divide_, + tests here must consider different + division (not equality) cases ''' + + def test_divide_by_zero(self): + self.assertFalse(divide_chemical_expression( + "0H2O", "H2O")) + + def test_divide_wrong_factors(self): + self.assertFalse(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-")) + + def test_divide_right(self): + self.assertEqual(divide_chemical_expression( + "5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1) + + def test_divide_wrong_reagents(self): + self.assertFalse(divide_chemical_expression( + "H2O + CO2", "CO2")) + + def test_divide_right_simple(self): + self.assertEqual(divide_chemical_expression( + "H2O + CO2", "H2O+CO2"), 1) + + def test_divide_right_phases(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O(s)+2CO2"), Fraction(1, 2)) + + def test_divide_right_phases_other_order(self): + self.assertEqual(divide_chemical_expression( + "2H2O(s) + 2CO2", "H2O(s)+CO2"), 2) + + def test_divide_wrong_phases(self): + self.assertFalse(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)")) + + def test_divide_wrong_phases_but_phases_ignored(self): + self.assertEqual(divide_chemical_expression( + "H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), Fraction(1, 2)) + + def test_divide_order(self): + self.assertEqual(divide_chemical_expression( + "2CO2 + H2O", "2H2O+4CO2"), Fraction(1, 2)) + + def test_divide_fract_to_int(self): + self.assertEqual(divide_chemical_expression( + "3/2CO2 + H2O", "2H2O+3CO2"), Fraction(1, 2)) + + def test_divide_fract_to_frac(self): + self.assertEqual(divide_chemical_expression( + "3/4CO2 + H2O", "2H2O+9/6CO2"), Fraction(1, 2)) + + def test_divide_fract_to_frac_wrog(self): + self.assertFalse(divide_chemical_expression( + "6/2CO2 + H2O", "2H2O+9/6CO2"), 2) + + +class Test_Render_Equations(unittest.TestCase): + + def test_render1(self): + s = "H2O + CO2" + out = render_to_html(s) + correct = u'H2O+CO2' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_uncorrect_reaction(self): + s = "O2C + OH2" + out = render_to_html(s) + correct = u'O2C+OH2' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render2(self): + s = "CO2 + H2O + Fe(OH)3" + out = render_to_html(s) + correct = u'CO2+H2O+Fe(OH)3' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render3(self): + s = "3H2O + 2CO2" + out = render_to_html(s) + correct = u'3H2O+2CO2' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render4(self): + s = "H^+ + OH^-" + out = render_to_html(s) + correct = u'H++OH-' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render5(self): + s = "Fe(OH)^2- + (OH)^-" + out = render_to_html(s) + correct = u'Fe(OH)2-+(OH)-' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render6(self): + s = "7/2H^+ + 3/5OH^-" + out = render_to_html(s) + correct = u'72H++35OH-' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render7(self): + s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O" + out = render_to_html(s) + correct = u'5(H1H212)70010-+2H2O+72HCl+H2O' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render8(self): + s = "H2O(s) + CO2" + out = render_to_html(s) + correct = u'H2O(s)+CO2' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render9(self): + s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-" + #import ipdb; ipdb.set_trace() + out = render_to_html(s) + correct = u'5[Ni(NH3)4]2++52SO42-' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_error(self): + s = "5.2H20" + out = render_to_html(s) + correct = u'5.2H20' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_simple_brackets(self): + s = "(Ar)" + out = render_to_html(s) + correct = u'(Ar)' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_eq1(self): + s = "H^+ + OH^- -> H2O" + out = render_to_html(s) + correct = u'H++OH-\u2192H2O' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + def test_render_eq2(self): + s = "H^+ + OH^- <-> H2O" + out = render_to_html(s) + correct = u'H++OH-\u2194H2O' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + + def test_render_eq3(self): + s = "H^+ + OH^- <= H2O" # unsupported arrow + out = render_to_html(s) + correct = u'H^+ + OH^- <= H2O' + log(out + ' ------- ' + correct, 'html') + self.assertEqual(out, correct) + + + +def suite(): + + testcases = [Test_Compare_Expressions, Test_Divide_Expressions, Test_Render_Equations] + suites = [] + for testcase in testcases: + suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase)) + return unittest.TestSuite(suites) + +if __name__ == "__main__": + local_debug = True + with codecs.open('render.html', 'w', encoding='utf-8') as f: + unittest.TextTestRunner(verbosity=2).run(suite()) + # open render.html to look at rendered equations diff --git a/common/lib/capa/capa/inputtypes.py b/common/lib/capa/capa/inputtypes.py index 466adcbf01..220c606daf 100644 --- a/common/lib/capa/capa/inputtypes.py +++ b/common/lib/capa/capa/inputtypes.py @@ -37,102 +37,174 @@ import xml.sax.saxutils as saxutils log = logging.getLogger('mitx.' + __name__) +######################################################################### -def get_input_xml_tags(): - ''' Eventually, this will be for all registered input types ''' - return SimpleInput.get_xml_tags() +_TAGS_TO_CLASSES = {} + +def register_input_class(cls): + """ + Register cls as a supported input type. It is expected to have the same constructor as + InputTypeBase, and to define cls.tags as a list of tags that it implements. + + If an already-registered input type has claimed one of those tags, will raise ValueError. + + If there are no tags in cls.tags, will also raise ValueError. + """ + + # Do all checks and complain before changing any state. + if len(cls.tags) == 0: + raise ValueError("No supported tags for class {0}".format(cls.__name__)) + + for t in cls.tags: + if t in _TAGS_TO_CLASSES: + other_cls = _TAGS_TO_CLASSES[t] + if cls == other_cls: + # registering the same class multiple times seems silly, but ok + continue + raise ValueError("Tag {0} already registered by class {1}. Can't register for class {2}" + .format(t, other_cls.__name__, cls.__name__)) + + # Ok, should be good to change state now. + for t in cls.tags: + _TAGS_TO_CLASSES[t] = cls + +def registered_input_tags(): + """ + Get a list of all the xml tags that map to known input types. + """ + return _TAGS_TO_CLASSES.keys() -class SimpleInput():# XModule - ''' - Type for simple inputs -- plain HTML with a form element - ''' +def get_class_for_tag(tag): + """ + For any tag in registered_input_tags(), return the corresponding class. Otherwise, will raise KeyError. + """ + return _TAGS_TO_CLASSES[tag] - # Maps tags to functions - xml_tags = {} - def __init__(self, system, xml, item_id=None, track_url=None, state=None, use='capa_input'): - ''' - Instantiate a SimpleInput class. Arguments: +class InputTypeBase(object): + """ + Abstract base class for input types. + """ - - system : ModuleSystem instance which provides OS, rendering, and user context + template = None + + def __init__(self, system, xml, state): + """ + Instantiate an InputType class. Arguments: + + - system : ModuleSystem instance which provides OS, rendering, and user context. Specifically, must + have a render_template function. - xml : Element tree of this Input element - - item_id : id for this input element (assigned by capa_problem.LoncapProblem) - string - - track_url : URL used for tracking - string - state : a dictionary with optional keys: - * Value - * ID - * Status (answered, unanswered, unsubmitted) - * Feedback (dictionary containing keys for hints, errors, or other - feedback from previous attempt) - - use : - ''' + * 'value' + * 'id' + * 'status' (answered, unanswered, unsubmitted) + * 'feedback' (dictionary containing keys for hints, errors, or other + feedback from previous attempt. Specifically 'message', 'hint', 'hintmode'. If 'hintmode' + is 'always', the hint is always displayed.) + """ self.xml = xml self.tag = xml.tag self.system = system - if not state: - state = {} - ## NOTE: ID should only come from one place. - ## If it comes from multiple, we use state first, XML second, and parameter - ## third. Since we don't make this guarantee, we can swap this around in - ## the future if there's a more logical order. - if item_id: - self.id = item_id + ## NOTE: ID should only come from one place. If it comes from multiple, + ## we use state first, XML second (in case the xml changed, but we have + ## existing state with an old id). Since we don't make this guarantee, + ## we can swap this around in the future if there's a more logical + ## order. - if xml.get('id'): - self.id = xml.get('id') - - if 'id' in state: - self.id = state['id'] + self.id = state.get('id', xml.get('id')) + if self.id is None: + raise ValueError("input id state is None. xml is {0}".format(etree.tostring(xml))) self.value = state.get('value', '') - self.msg = '' - feedback = state.get('feedback') - if feedback is not None: - self.msg = feedback.get('message', '') - self.hint = feedback.get('hint', '') - self.hintmode = feedback.get('hintmode', None) + feedback = state.get('feedback', {}) + self.msg = feedback.get('message', '') + self.hint = feedback.get('hint', '') + self.hintmode = feedback.get('hintmode', None) - # put hint above msg if to be displayed - if self.hintmode == 'always': - # TODO: is the '.' in
below a bug? - self.msg = self.hint + ('
' if self.msg else '') + self.msg + # put hint above msg if it should be displayed + if self.hintmode == 'always': + self.msg = self.hint + ('
' if self.msg else '') + self.msg - self.status = 'unanswered' - if 'status' in state: - self.status = state['status'] + self.status = state.get('status', 'unanswered') - @classmethod - def get_xml_tags(c): - return c.xml_tags.keys() + def _get_render_context(self): + """ + Abstract method. Subclasses should implement to return the dictionary + of keys needed to render their template. - @classmethod - def get_uses(c): - return ['capa_input', 'capa_transform'] - - def get_html(self): - return self.xml_tags[self.tag](self.xml, self.value, - self.status, self.system.render_template, self.msg) - - -def register_render_function(fn, names=None, cls=SimpleInput): - if names is None: - SimpleInput.xml_tags[fn.__name__] = fn - else: + (Separate from get_html to faciliate testing of logic separately from the rendering) + """ raise NotImplementedError - def wrapped(): - return fn - return wrapped + def get_html(self): + """ + Return a the html for this input, as an etree element. + """ + if self.template is None: + raise NotImplementedError("no rendering template specified for class {0}".format(self.__class__)) + + html = self.system.render_template(self.template, self._get_render_context()) + return etree.XML(html) + + +## TODO: Remove once refactor is complete +def make_class_for_render_function(fn): + """ + Take an old-style render function, return a new-style input class. + """ + + class Impl(InputTypeBase): + """ + Inherit all the constructor logic from InputTypeBase... + """ + tags = [fn.__name__] + def get_html(self): + """...delegate to the render function to do the work""" + return fn(self.xml, self.value, self.status, self.system.render_template, self.msg) + + # don't want all the classes to be called Impl (confuses register_input_class). + Impl.__name__ = fn.__name__.capitalize() + return Impl + + +def _reg(fn): + """ + Register an old-style inputtype render function as a new-style subclass of InputTypeBase. + This will go away once converting all input types to the new format is complete. (TODO) + """ + register_input_class(make_class_for_render_function(fn)) + #----------------------------------------------------------------------------- -@register_render_function +class OptionInput(InputTypeBase): + """ + Input type for selecting and Select option input type. + + Example: + + The location of the sky + """ + + template = "optioninput.html" + tags = ['optioninput'] + + def _get_render_context(self): + return _optioninput(self.xml, self.value, self.status, self.system.render_template, self.msg) + + def optioninput(element, value, status, render_template, msg=''): + context = _optioninput(element, value, status, render_template, msg) + html = render_template("optioninput.html", context) + return etree.XML(html) + +def _optioninput(element, value, status, render_template, msg=''): """ Select option input type. @@ -146,12 +218,14 @@ def optioninput(element, value, status, render_template, msg=''): raise Exception( "[courseware.capa.inputtypes.optioninput] Missing options specification in " + etree.tostring(element)) + + # parse the set of possible options oset = shlex.shlex(options[1:-1]) oset.quotes = "'" oset.whitespace = "," oset = [x[1:-1] for x in list(oset)] - # make ordered list with (key,value) same + # make ordered list with (key, value) same osetdict = [(oset[x], oset[x]) for x in range(len(oset))] # TODO: allow ordering to be randomized @@ -162,16 +236,16 @@ def optioninput(element, value, status, render_template, msg=''): 'options': osetdict, 'inline': element.get('inline',''), } + return context - html = render_template("optioninput.html", context) - return etree.XML(html) +register_input_class(OptionInput) #----------------------------------------------------------------------------- # TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of # desired semantics. -@register_render_function +# @register_render_function def choicegroup(element, value, status, render_template, msg=''): ''' Radio button inputs: multiple choice or true/false @@ -208,6 +282,7 @@ def choicegroup(element, value, status, render_template, msg=''): html = render_template("choicegroup.html", context) return etree.XML(html) +_reg(choicegroup) #----------------------------------------------------------------------------- def extract_choices(element): @@ -235,7 +310,6 @@ def extract_choices(element): # TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of # desired semantics. -@register_render_function def radiogroup(element, value, status, render_template, msg=''): ''' Radio button inputs: (multiple choice) @@ -256,9 +330,10 @@ def radiogroup(element, value, status, render_template, msg=''): return etree.XML(html) +_reg(radiogroup) + # TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of # desired semantics. -@register_render_function def checkboxgroup(element, value, status, render_template, msg=''): ''' Checkbox inputs: (select one or more choices) @@ -278,7 +353,8 @@ def checkboxgroup(element, value, status, render_template, msg=''): html = render_template("choicegroup.html", context) return etree.XML(html) -@register_render_function +_reg(checkboxgroup) + def javascriptinput(element, value, status, render_template, msg='null'): ''' Hidden field for javascript to communicate via; also loads the required @@ -309,16 +385,16 @@ def javascriptinput(element, value, status, render_template, msg='null'): html = render_template("javascriptinput.html", context) return etree.XML(html) +_reg(javascriptinput) + -@register_render_function def textline(element, value, status, render_template, msg=""): ''' Simple text line input, with optional size specification. ''' # TODO: 'dojs' flag is temporary, for backwards compatibility with 8.02x if element.get('math') or element.get('dojs'): - return SimpleInput.xml_tags['textline_dynamath'](element, value, status, - render_template, msg) + return textline_dynamath(element, value, status, render_template, msg) eid = element.get('id') if eid is None: msg = 'textline has no id: it probably appears outside of a known response type' @@ -354,10 +430,11 @@ def textline(element, value, status, render_template, msg=""): raise return xhtml +_reg(textline) + #----------------------------------------------------------------------------- -@register_render_function def textline_dynamath(element, value, status, render_template, msg=''): ''' Text line input with dynamic math display (equation rendered on client in real time @@ -397,9 +474,10 @@ def textline_dynamath(element, value, status, render_template, msg=''): html = render_template("textinput_dynamath.html", context) return etree.XML(html) +_reg(textline_dynamath) + #----------------------------------------------------------------------------- -@register_render_function def filesubmission(element, value, status, render_template, msg=''): ''' Upload a single file (e.g. for programming assignments) @@ -429,10 +507,11 @@ def filesubmission(element, value, status, render_template, msg=''): html = render_template("filesubmission.html", context) return etree.XML(html) +_reg(filesubmission) + #----------------------------------------------------------------------------- ## TODO: Make a wrapper for -@register_render_function def textbox(element, value, status, render_template, msg=''): ''' The textbox is used for code input. The message is the return HTML string from @@ -491,8 +570,9 @@ def textbox(element, value, status, render_template, msg=''): return xhtml +_reg(textbox) + #----------------------------------------------------------------------------- -@register_render_function def schematic(element, value, status, render_template, msg=''): eid = element.get('id') height = element.get('height') @@ -515,10 +595,10 @@ def schematic(element, value, status, render_template, msg=''): html = render_template("schematicinput.html", context) return etree.XML(html) +_reg(schematic) #----------------------------------------------------------------------------- ### TODO: Move out of inputtypes -@register_render_function def math(element, value, status, render_template, msg=''): ''' This is not really an input type. It is a convention from Lon-CAPA, used for @@ -563,16 +643,17 @@ def math(element, value, status, render_template, msg=''): # xhtml.tail = element.tail # don't forget to include the tail! return xhtml +_reg(math) + #----------------------------------------------------------------------------- -@register_render_function def solution(element, value, status, render_template, msg=''): ''' This is not really an input type. It is just a ... which is given an ID, that is used for displaying an extended answer (a problem "solution") after "show answers" is pressed. Note that the solution content is NOT sent with the HTML. It is obtained - by a JSON call. + by an ajax call. ''' eid = element.get('id') size = element.get('size') @@ -585,10 +666,11 @@ def solution(element, value, status, render_template, msg=''): html = render_template("solutionspan.html", context) return etree.XML(html) +_reg(solution) + #----------------------------------------------------------------------------- -@register_render_function def imageinput(element, value, status, render_template, msg=''): ''' Clickable image as an input field. Element should specify the image source, height, @@ -624,3 +706,36 @@ def imageinput(element, value, status, render_template, msg=''): } html = render_template("imageinput.html", context) return etree.XML(html) + +_reg(imageinput) + + +#-------------------------------------------------------------------------------- + + +class ChemicalEquationInput(InputTypeBase): + """ + An input type for entering chemical equations. Supports live preview. + + Example: + + + + options: size -- width of the textbox. + """ + + template = "chemicalequationinput.html" + tags = ['chemicalequationinput'] + + def _get_render_context(self): + size = self.xml.get('size', '20') + context = { + 'id': self.id, + 'value': self.value, + 'status': self.status, + 'size': size, + 'previewer': '/static/js/capa/chemical_equation_preview.js', + } + return context + +register_input_class(ChemicalEquationInput) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index df98f62fc5..eb739c9b72 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -756,15 +756,26 @@ class NumericalResponse(LoncapaResponse): def get_score(self, student_answers): '''Grade a numeric response ''' student_answer = student_answers[self.answer_id] + + try: + correct_ans = complex(self.correct_answer) + except ValueError: + log.debug("Content error--answer '{0}' is not a valid complex number".format(self.correct_answer)) + raise StudentInputError("There was a problem with the staff answer to this problem") + try: correct = compare_with_tolerance(evaluator(dict(), dict(), student_answer), - complex(self.correct_answer), self.tolerance) + correct_ans, self.tolerance) # We should catch this explicitly. # I think this is just pyparsing.ParseException, calc.UndefinedVariable: # But we'd need to confirm except: - raise StudentInputError("Invalid input: could not interpret '%s' as a number" % - cgi.escape(student_answer)) + # Use the traceback-preserving version of re-raising with a different type + import sys + type, value, traceback = sys.exc_info() + + raise StudentInputError, ("Invalid input: could not interpret '%s' as a number" % + cgi.escape(student_answer)), traceback if correct: return CorrectMap(self.answer_id, 'correct') @@ -856,7 +867,7 @@ def sympy_check2(): """}] response_tag = 'customresponse' - allowed_inputfields = ['textline', 'textbox'] + allowed_inputfields = ['textline', 'textbox', 'chemicalequationinput'] def setup_response(self): xml = self.xml diff --git a/common/lib/capa/capa/templates/chemicalequationinput.html b/common/lib/capa/capa/templates/chemicalequationinput.html new file mode 100644 index 0000000000..f705ec3d06 --- /dev/null +++ b/common/lib/capa/capa/templates/chemicalequationinput.html @@ -0,0 +1,42 @@ +
+
+ + % if status == 'unsubmitted': +
+ % elif status == 'correct': +
+ % elif status == 'incorrect': +
+ % elif status == 'incomplete': +
+ % endif + + + +

+ % if status == 'unsubmitted': + unanswered + % elif status == 'correct': + correct + % elif status == 'incorrect': + incorrect + % elif status == 'incomplete': + incomplete + % endif +

+ +
+ +
+ + +

+ +% if status in ['unsubmitted', 'correct', 'incorrect', 'incomplete']: +
+% endif +
diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py new file mode 100644 index 0000000000..c72d2a1538 --- /dev/null +++ b/common/lib/capa/capa/tests/__init__.py @@ -0,0 +1,21 @@ +import fs +import fs.osfs +import os + +from mock import Mock + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + +test_system = Mock( + ajax_url='courses/course_id/modx/a_location', + track_function=Mock(), + get_module=Mock(), + render_template=Mock(), + replace_urls=Mock(), + user=Mock(), + filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")), + debug=True, + xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10}, + node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), + anonymous_student_id = 'student' +) diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_checkbox.xml rename to common/lib/capa/capa/tests/test_files/choiceresponse_checkbox.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml b/common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/choiceresponse_radio.xml rename to common/lib/capa/capa/tests/test_files/choiceresponse_radio.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml b/common/lib/capa/capa/tests/test_files/coderesponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse.xml rename to common/lib/capa/capa/tests/test_files/coderesponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml b/common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/coderesponse_externalresponseformat.xml rename to common/lib/capa/capa/tests/test_files/coderesponse_externalresponseformat.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/formularesponse_with_hint.xml rename to common/lib/capa/capa/tests/test_files/formularesponse_with_hint.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml b/common/lib/capa/capa/tests/test_files/imageresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/imageresponse.xml rename to common/lib/capa/capa/tests/test_files/imageresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml b/common/lib/capa/capa/tests/test_files/javascriptresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/javascriptresponse.xml rename to common/lib/capa/capa/tests/test_files/javascriptresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js b/common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js rename to common/lib/capa/capa/tests/test_files/js/compiled/c9a9cd4242d84c924fe5f8324e9ae79d.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js b/common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/compiled/javascriptresponse.js rename to common/lib/capa/capa/tests/test_files/js/compiled/javascriptresponse.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js b/common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/mersenne-twister-min.js rename to common/lib/capa/capa/tests/test_files/js/mersenne-twister-min.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js b/common/lib/capa/capa/tests/test_files/js/test_problem_display.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_display.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_display.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js b/common/lib/capa/capa/tests/test_files/js/test_problem_generator.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_generator.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_generator.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.coffee rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js b/common/lib/capa/capa/tests/test_files/js/test_problem_grader.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/test_problem_grader.js rename to common/lib/capa/capa/tests/test_files/js/test_problem_grader.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee b/common/lib/capa/capa/tests/test_files/js/xproblem.coffee similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.coffee rename to common/lib/capa/capa/tests/test_files/js/xproblem.coffee diff --git a/common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js b/common/lib/capa/capa/tests/test_files/js/xproblem.js similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/js/xproblem.js rename to common/lib/capa/capa/tests/test_files/js/xproblem.js diff --git a/common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml b/common/lib/capa/capa/tests/test_files/multi_bare.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/multi_bare.xml rename to common/lib/capa/capa/tests/test_files/multi_bare.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/multichoice.xml b/common/lib/capa/capa/tests/test_files/multichoice.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/multichoice.xml rename to common/lib/capa/capa/tests/test_files/multichoice.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml b/common/lib/capa/capa/tests/test_files/optionresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/optionresponse.xml rename to common/lib/capa/capa/tests/test_files/optionresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml b/common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/stringresponse_with_hint.xml rename to common/lib/capa/capa/tests/test_files/stringresponse_with_hint.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml b/common/lib/capa/capa/tests/test_files/symbolicresponse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/symbolicresponse.xml rename to common/lib/capa/capa/tests/test_files/symbolicresponse.xml diff --git a/common/lib/xmodule/xmodule/tests/test_files/truefalse.xml b/common/lib/capa/capa/tests/test_files/truefalse.xml similarity index 100% rename from common/lib/xmodule/xmodule/tests/test_files/truefalse.xml rename to common/lib/capa/capa/tests/test_files/truefalse.xml diff --git a/common/lib/capa/capa/tests/test_inputtypes.py b/common/lib/capa/capa/tests/test_inputtypes.py new file mode 100644 index 0000000000..9ef642d468 --- /dev/null +++ b/common/lib/capa/capa/tests/test_inputtypes.py @@ -0,0 +1,68 @@ +""" +Tests of input types (and actually responsetypes too) +""" + +from datetime import datetime +import json +from mock import Mock +from nose.plugins.skip import SkipTest +import os +import unittest + +from . import test_system +from capa import inputtypes + +from lxml import etree + +def tst_render_template(template, context): + """ + A test version of render to template. Renders to the repr of the context, completely ignoring the template name. + """ + return repr(context) + + +system = Mock(render_template=tst_render_template) + +class OptionInputTest(unittest.TestCase): + ''' + Make sure option inputs work + ''' + def test_rendering_new(self): + xml = """""" + element = etree.fromstring(xml) + + value = 'Down' + status = 'answered' + context = inputtypes._optioninput(element, value, status, test_system.render_template) + print 'context: ', context + + expected = {'value': 'Down', + 'options': [('Up', 'Up'), ('Down', 'Down')], + 'state': 'answered', + 'msg': '', + 'inline': '', + 'id': 'sky_input'} + + self.assertEqual(context, expected) + + + def test_rendering(self): + xml_str = """""" + element = etree.fromstring(xml_str) + + state = {'value': 'Down', + 'id': 'sky_input', + 'status': 'answered'} + option_input = inputtypes.OptionInput(system, element, state) + + context = option_input._get_render_context() + + expected = {'value': 'Down', + 'options': [('Up', 'Up'), ('Down', 'Down')], + 'state': 'answered', + 'msg': '', + 'inline': '', + 'id': 'sky_input'} + + self.assertEqual(context, expected) + diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py new file mode 100644 index 0000000000..f2fa873080 --- /dev/null +++ b/common/lib/capa/capa/tests/test_responsetypes.py @@ -0,0 +1,384 @@ +""" +Tests of responsetypes +""" + + +from datetime import datetime +import json +from nose.plugins.skip import SkipTest +import os +import unittest + +from . import test_system + +import capa.capa_problem as lcp +from capa.correctmap import CorrectMap +from capa.util import convert_files_to_filenames +from capa.xqueue_interface import dateformat + +class MultiChoiceTest(unittest.TestCase): + def test_MC_grade(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_foil3'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_foil2'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_MC_bare_grades(self): + multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" + test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': 'choice_1'} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + def test_TF_grade(self): + truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" + test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + false_answers = {'1_2_1': ['choice_foil1']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} + self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') + + +class ImageResponseTest(unittest.TestCase): + def test_ir_grade(self): + imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" + test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '(490,11)-(556,98)', + '1_2_2': '(242,202)-(296,276)'} + test_answers = {'1_2_1': '[500,20]', + '1_2_2': '[250,300]', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class SymbolicResponseTest(unittest.TestCase): + def test_sr_grade(self): + raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test + symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" + test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', + '1_2_1_dynamath': ''' + + + + cos + + ( + θ + ) + + + + + [ + + + + 1 + + + 0 + + + + + 0 + + + 1 + + + + ] + + + + i + + + sin + + ( + θ + ) + + + + + [ + + + + 0 + + + 1 + + + + + 1 + + + 0 + + + + ] + + + +''', + } + wrong_answers = {'1_2_1': '2', + '1_2_1_dynamath': ''' + + + 2 + +''', + } + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') + + +class OptionResponseTest(unittest.TestCase): + ''' + Run this with + + python manage.py test courseware.OptionResponseTest + ''' + def test_or_grade(self): + optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" + test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'True', + '1_2_2': 'False'} + test_answers = {'1_2_1': 'True', + '1_2_2': 'True', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') + + +class FormulaResponseWithHintTest(unittest.TestCase): + ''' + Test Formula response problem with a hint + This problem also uses calc. + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': '2.5*x-5.0'} + test_answers = {'1_2_1': '0.4*x-5.0'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) + + +class StringResponseWithHintTest(unittest.TestCase): + ''' + Test String response problem with a hint + ''' + def test_or_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'Michigan'} + test_answers = {'1_2_1': 'Minnesota'} + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + cmap = test_lcp.grade_answers(test_answers) + self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') + self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) + + +class CodeResponseTest(unittest.TestCase): + ''' + Test CodeResponse + TODO: Add tests for external grader messages + ''' + @staticmethod + def make_queuestate(key, time): + timestr = datetime.strftime(time, dateformat) + return {'key': key, 'time': timestr} + + def test_is_queued(self): + """ + Simple test of whether LoncapaProblem knows when it's been queued + """ + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), False) + + # Now we queue the LCP + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) + cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.is_queued(), True) + + + def test_update_score(self): + ''' + Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + old_cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) + old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) + + # Message format common to external graders + grader_msg = 'MESSAGE' # Must be valid XML + correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) + incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) + + xserver_msgs = {'correct': correct_score_msg, + 'incorrect': incorrect_score_msg,} + + # Incorrect queuekey, state should not be updated + for correctness in ['correct', 'incorrect']: + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) # Deep copy + + test_lcp.update_score(xserver_msgs[correctness], queuekey=0) + self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison + + for answer_id in answer_ids: + self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered + + # Correct queuekey, state should be updated + for correctness in ['correct', 'incorrect']: + for i, answer_id in enumerate(answer_ids): + test_lcp.correct_map = CorrectMap() + test_lcp.correct_map.update(old_cmap) + + new_cmap = CorrectMap() + new_cmap.update(old_cmap) + npoints = 1 if correctness=='correct' else 0 + new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) + + test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) + self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) + + for j, test_id in enumerate(answer_ids): + if j == i: + self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered + else: + self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered + + + def test_recentmost_queuetime(self): + ''' + Test whether the LoncapaProblem knows about the time of queue requests + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as input_file: + test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=test_system) + + answer_ids = sorted(test_lcp.get_question_answers()) + + # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state + cmap = CorrectMap() + for answer_id in answer_ids: + cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) + test_lcp.correct_map.update(cmap) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), None) + + # CodeResponse requires internal CorrectMap state. Build it now in the queued state + cmap = CorrectMap() + for i, answer_id in enumerate(answer_ids): + queuekey = 1000 + i + latest_timestamp = datetime.now() + queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) + cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) + test_lcp.correct_map.update(cmap) + + # Queue state only tracks up to second + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) + + self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) + + def test_convert_files_to_filenames(self): + ''' + Test whether file objects are converted to filenames without altering other structures + ''' + problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") + with open(problem_file) as fp: + answers_with_file = {'1_2_1': 'String-based answer', + '1_3_1': ['answer1', 'answer2', 'answer3'], + '1_4_1': [fp, fp]} + answers_converted = convert_files_to_filenames(answers_with_file) + self.assertEquals(answers_converted['1_2_1'], 'String-based answer') + self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) + self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) + + +class ChoiceResponseTest(unittest.TestCase): + + def test_cr_rb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + + def test_cr_cb_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': 'choice_2', + '1_3_1': ['choice_2', 'choice_3'], + '1_4_1': ['choice_2', 'choice_3']} + test_answers = {'1_2_1': 'choice_2', + '1_3_1': 'choice_2', + '1_4_1': ['choice_2', 'choice_3'], + } + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') + +class JavascriptResponseTest(unittest.TestCase): + + def test_jr_grade(self): + problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" + coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" + os.system("coffee -c %s" % (coffee_file_path)) + test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=test_system) + correct_answers = {'1_2_1': json.dumps({0: 4})} + incorrect_answers = {'1_2_1': json.dumps({0: 5})} + + self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') + self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') + diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index 95eb2c996c..540a4edc9f 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -133,6 +133,11 @@ class CapaModule(XModule): if self.rerandomize == 'never': self.seed = 1 elif self.rerandomize == "per_student" and hasattr(self.system, 'id'): + # TODO: This line is badly broken: + # (1) We're passing student ID to xmodule. + # (2) There aren't bins of students. -- we only want 10 or 20 randomizations, and want to assign students + # to these bins, and may not want cohorts. So e.g. hash(your-id, problem_id) % num_bins. + # - analytics really needs small number of bins. self.seed = system.id else: self.seed = None diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 654b6beb15..ed64c45118 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -1,31 +1,22 @@ -# -# unittests for xmodule (and capa) -# -# Note: run this using a like like this: -# -# django-admin.py test --settings=lms.envs.test_ike --pythonpath=. common/lib/xmodule +""" +unittests for xmodule + +Run like this: + + rake test_common/lib/xmodule + +""" import unittest import os import fs import fs.osfs -import json -import json import numpy -import xmodule import capa.calc as calc -import capa.capa_problem as lcp -from capa.correctmap import CorrectMap -from capa.util import convert_files_to_filenames -from capa.xqueue_interface import dateformat -from datetime import datetime -from xmodule import graders, x_module +import xmodule from xmodule.x_module import ModuleSystem -from xmodule.graders import Score, aggregate_scores -from xmodule.progress import Progress -from nose.plugins.skip import SkipTest from mock import Mock i4xs = ModuleSystem( @@ -35,7 +26,7 @@ i4xs = ModuleSystem( render_template=Mock(), replace_urls=Mock(), user=Mock(), - filestore=fs.osfs.OSFS(os.path.dirname(os.path.realpath(__file__))+"/test_files"), + filestore=Mock(), debug=True, xqueue={'interface':None, 'callback_url':'/', 'default_queuename': 'testqueue', 'waittime': 10}, node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"), @@ -94,719 +85,3 @@ class ModelsTest(unittest.TestCase): exception_happened = True self.assertTrue(exception_happened) -#----------------------------------------------------------------------------- -# tests of capa_problem inputtypes - - -class MultiChoiceTest(unittest.TestCase): - def test_MC_grade(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multichoice.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_foil3'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_foil2'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_MC_bare_grades(self): - multichoice_file = os.path.dirname(__file__) + "/test_files/multi_bare.xml" - test_lcp = lcp.LoncapaProblem(open(multichoice_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': 'choice_1'} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - def test_TF_grade(self): - truefalse_file = os.path.dirname(__file__) + "/test_files/truefalse.xml" - test_lcp = lcp.LoncapaProblem(open(truefalse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': ['choice_foil2', 'choice_foil1']} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - false_answers = {'1_2_1': ['choice_foil1']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - false_answers = {'1_2_1': ['choice_foil1', 'choice_foil2', 'choice_foil3']} - self.assertEquals(test_lcp.grade_answers(false_answers).get_correctness('1_2_1'), 'incorrect') - - -class ImageResponseTest(unittest.TestCase): - def test_ir_grade(self): - imageresponse_file = os.path.dirname(__file__) + "/test_files/imageresponse.xml" - test_lcp = lcp.LoncapaProblem(open(imageresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': '(490,11)-(556,98)', - '1_2_2': '(242,202)-(296,276)'} - test_answers = {'1_2_1': '[500,20]', - '1_2_2': '[250,300]', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - - -class SymbolicResponseTest(unittest.TestCase): - def test_sr_grade(self): - raise SkipTest() # This test fails due to dependencies on a local copy of snuggletex-webapp. Until we have figured that out, we'll just skip this test - symbolicresponse_file = os.path.dirname(__file__) + "/test_files/symbolicresponse.xml" - test_lcp = lcp.LoncapaProblem(open(symbolicresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'cos(theta)*[[1,0],[0,1]] + i*sin(theta)*[[0,1],[1,0]]', - '1_2_1_dynamath': ''' - - - - cos - - ( - θ - ) - - - - - [ - - - - 1 - - - 0 - - - - - 0 - - - 1 - - - - ] - - + - i - - - sin - - ( - θ - ) - - - - - [ - - - - 0 - - - 1 - - - - - 1 - - - 0 - - - - ] - - - -''', - } - wrong_answers = {'1_2_1': '2', - '1_2_1_dynamath': ''' - - - 2 - -''', - } - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(wrong_answers).get_correctness('1_2_1'), 'incorrect') - - -class OptionResponseTest(unittest.TestCase): - ''' - Run this with - - python manage.py test courseware.OptionResponseTest - ''' - def test_or_grade(self): - optionresponse_file = os.path.dirname(__file__) + "/test_files/optionresponse.xml" - test_lcp = lcp.LoncapaProblem(open(optionresponse_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'True', - '1_2_2': 'False'} - test_answers = {'1_2_1': 'True', - '1_2_2': 'True', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_2'), 'incorrect') - - -class FormulaResponseWithHintTest(unittest.TestCase): - ''' - Test Formula response problem with a hint - This problem also uses calc. - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/formularesponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': '2.5*x-5.0'} - test_answers = {'1_2_1': '0.4*x-5.0'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('You have inverted' in cmap.get_hint('1_2_1')) - - -class StringResponseWithHintTest(unittest.TestCase): - ''' - Test String response problem with a hint - ''' - def test_or_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/stringresponse_with_hint.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'Michigan'} - test_answers = {'1_2_1': 'Minnesota'} - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - cmap = test_lcp.grade_answers(test_answers) - self.assertEquals(cmap.get_correctness('1_2_1'), 'incorrect') - self.assertTrue('St. Paul' in cmap.get_hint('1_2_1')) - - -class CodeResponseTest(unittest.TestCase): - ''' - Test CodeResponse - TODO: Add tests for external grader messages - ''' - @staticmethod - def make_queuestate(key, time): - timestr = datetime.strftime(time, dateformat) - return {'key': key, 'time': timestr} - - def test_is_queued(self): - ''' - Simple test of whether LoncapaProblem knows when it's been queued - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), False) - - # Now we queue the LCP - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuestate = CodeResponseTest.make_queuestate(i, datetime.now()) - cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.is_queued(), True) - - - def test_update_score(self): - ''' - Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - old_cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - queuestate = CodeResponseTest.make_queuestate(1000+i, datetime.now()) - old_cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate)) - - # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct':True, 'score':1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct':False, 'score':0, 'msg': grader_msg}) - - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg,} - - # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) # Deep copy - - test_lcp.update_score(xserver_msgs[correctness], queuekey=0) - self.assertEquals(test_lcp.correct_map.get_dict(), old_cmap.get_dict()) # Deep comparison - - for answer_id in answer_ids: - self.assertTrue(test_lcp.correct_map.is_queued(answer_id)) # Should be still queued, since message undelivered - - # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: - for i, answer_id in enumerate(answer_ids): - test_lcp.correct_map = CorrectMap() - test_lcp.correct_map.update(old_cmap) - - new_cmap = CorrectMap() - new_cmap.update(old_cmap) - npoints = 1 if correctness=='correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) - - test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) - self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict()) - - for j, test_id in enumerate(answer_ids): - if j == i: - self.assertFalse(test_lcp.correct_map.is_queued(test_id)) # Should be dequeued, message delivered - else: - self.assertTrue(test_lcp.correct_map.is_queued(test_id)) # Should be queued, message undelivered - - - def test_recentmost_queuetime(self): - ''' - Test whether the LoncapaProblem knows about the time of queue requests - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as input_file: - test_lcp = lcp.LoncapaProblem(input_file.read(), '1', system=i4xs) - - answer_ids = sorted(test_lcp.get_question_answers()) - - # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state - cmap = CorrectMap() - for answer_id in answer_ids: - cmap.update(CorrectMap(answer_id=answer_id, queuestate=None)) - test_lcp.correct_map.update(cmap) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), None) - - # CodeResponse requires internal CorrectMap state. Build it now in the queued state - cmap = CorrectMap() - for i, answer_id in enumerate(answer_ids): - queuekey = 1000 + i - latest_timestamp = datetime.now() - queuestate = CodeResponseTest.make_queuestate(1000+i, latest_timestamp) - cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) - test_lcp.correct_map.update(cmap) - - # Queue state only tracks up to second - latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat) - - self.assertEquals(test_lcp.get_recentmost_queuetime(), latest_timestamp) - - def test_convert_files_to_filenames(self): - ''' - Test whether file objects are converted to filenames without altering other structures - ''' - problem_file = os.path.join(os.path.dirname(__file__), "test_files/coderesponse.xml") - with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} - answers_converted = convert_files_to_filenames(answers_with_file) - self.assertEquals(answers_converted['1_2_1'], 'String-based answer') - self.assertEquals(answers_converted['1_3_1'], ['answer1', 'answer2', 'answer3']) - self.assertEquals(answers_converted['1_4_1'], [fp.name, fp.name]) - - -class ChoiceResponseTest(unittest.TestCase): - - def test_cr_rb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_radio.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - - def test_cr_cb_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/choiceresponse_checkbox.xml" - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': 'choice_2', - '1_3_1': ['choice_2', 'choice_3'], - '1_4_1': ['choice_2', 'choice_3']} - test_answers = {'1_2_1': 'choice_2', - '1_3_1': 'choice_2', - '1_4_1': ['choice_2', 'choice_3'], - } - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_2_1'), 'correct') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_3_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(test_answers).get_correctness('1_4_1'), 'correct') - -class JavascriptResponseTest(unittest.TestCase): - - def test_jr_grade(self): - problem_file = os.path.dirname(__file__) + "/test_files/javascriptresponse.xml" - coffee_file_path = os.path.dirname(__file__) + "/test_files/js/*.coffee" - os.system("coffee -c %s" % (coffee_file_path)) - test_lcp = lcp.LoncapaProblem(open(problem_file).read(), '1', system=i4xs) - correct_answers = {'1_2_1': json.dumps({0: 4})} - incorrect_answers = {'1_2_1': json.dumps({0: 5})} - - self.assertEquals(test_lcp.grade_answers(incorrect_answers).get_correctness('1_2_1'), 'incorrect') - self.assertEquals(test_lcp.grade_answers(correct_answers).get_correctness('1_2_1'), 'correct') - -#----------------------------------------------------------------------------- -# Grading tests - - -class GradesheetTest(unittest.TestCase): - - def test_weighted_grading(self): - scores = [] - Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) - - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) - - scores.append(Score(earned=0, possible=5, graded=False, section="summary")) - all, graded = aggregate_scores(scores) - self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary")) - self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) - - scores.append(Score(earned=3, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary")) - - scores.append(Score(earned=2, possible=5, graded=True, section="summary")) - all, graded = aggregate_scores(scores) - self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary")) - self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary")) - - -class GraderTest(unittest.TestCase): - - empty_gradesheet = { - } - - incomplete_gradesheet = { - 'Homework': [], - 'Lab': [], - 'Midterm': [], - } - - test_gradesheet = { - 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), - Score(earned=16, possible=16.0, graded=True, section='hw2')], - #The dropped scores should be from the assignments that don't exist yet - - 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped - Score(earned=1, possible=1.0, graded=True, section='lab2'), - Score(earned=1, possible=1.0, graded=True, section='lab3'), - Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped - Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped - Score(earned=6, possible=7.0, graded=True, section='lab6'), - Score(earned=5, possible=6.0, graded=True, section='lab7')], - - 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], - } - - def test_SingleSectionGrader(self): - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") - lab4Grader = graders.SingleSectionGrader("Lab", "lab4") - badLabGrader = graders.SingleSectionGrader("Lab", "lab42") - - for graded in [midtermGrader.grade(self.empty_gradesheet), - midtermGrader.grade(self.incomplete_gradesheet), - badLabGrader.grade(self.test_gradesheet)]: - self.assertEqual(len(graded['section_breakdown']), 1) - self.assertEqual(graded['percent'], 0.0) - - graded = midtermGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.505) - self.assertEqual(len(graded['section_breakdown']), 1) - - graded = lab4Grader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.2) - self.assertEqual(len(graded['section_breakdown']), 1) - - def test_AssignmentFormatGrader(self): - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0) - #Even though the minimum number is 3, this should grade correctly when 7 assignments are found - overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) - - #Test the grading of an empty gradesheet - for graded in [homeworkGrader.grade(self.empty_gradesheet), - noDropGrader.grade(self.empty_gradesheet), - homeworkGrader.grade(self.incomplete_gradesheet), - noDropGrader.grade(self.incomplete_gradesheet)]: - self.assertAlmostEqual(graded['percent'], 0.0) - #Make sure the breakdown includes 12 sections, plus one summary - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = homeworkGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = noDropGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - graded = overflowGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments - self.assertEqual(len(graded['section_breakdown']), 7 + 1) - - graded = labGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.9226190476190477) - self.assertEqual(len(graded['section_breakdown']), 7 + 1) - - def test_WeightedSubsectionsGrader(self): - #First, a few sub graders - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) - midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") - - weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), (labGrader, labGrader.category, 0.25), - (midtermGrader, midtermGrader.category, 0.5)]) - - overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), (labGrader, labGrader.category, 0.5), - (midtermGrader, midtermGrader.category, 0.5)]) - - #The midterm should have all weight on this one - zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.5)]) - - #This should always have a final percent of zero - allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), (labGrader, labGrader.category, 0.0), - (midtermGrader, midtermGrader.category, 0.0)]) - - emptyGrader = graders.WeightedSubsectionsGrader([]) - - graded = weightedGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.5106547619047619) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = overOneWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.7688095238095238) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = zeroWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.2525) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = allZeroWeightsGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - for graded in [weightedGrader.grade(self.empty_gradesheet), - weightedGrader.grade(self.incomplete_gradesheet), - zeroWeightsGrader.grade(self.empty_gradesheet), - allZeroWeightsGrader.grade(self.empty_gradesheet)]: - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = emptyGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), 0) - self.assertEqual(len(graded['grade_breakdown']), 0) - - def test_graderFromConf(self): - - #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test - #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. - - weightedGrader = graders.grader_from_conf([ - { - 'type': "Homework", - 'min_count': 12, - 'drop_count': 2, - 'short_label': "HW", - 'weight': 0.25, - }, - { - 'type': "Lab", - 'min_count': 7, - 'drop_count': 3, - 'category': "Labs", - 'weight': 0.25 - }, - { - 'type': "Midterm", - 'name': "Midterm Exam", - 'short_label': "Midterm", - 'weight': 0.5, - }, - ]) - - emptyGrader = graders.grader_from_conf([]) - - graded = weightedGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.5106547619047619) - self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) - self.assertEqual(len(graded['grade_breakdown']), 3) - - graded = emptyGrader.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.0) - self.assertEqual(len(graded['section_breakdown']), 0) - self.assertEqual(len(graded['grade_breakdown']), 0) - - #Test that graders can also be used instead of lists of dictionaries - homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) - homeworkGrader2 = graders.grader_from_conf(homeworkGrader) - - graded = homeworkGrader2.grade(self.test_gradesheet) - self.assertAlmostEqual(graded['percent'], 0.11) - self.assertEqual(len(graded['section_breakdown']), 12 + 1) - - #TODO: How do we test failure cases? The parser only logs an error when it can't parse something. Maybe it should throw exceptions? - -# -------------------------------------------------------------------------- -# Module progress tests - - -class ProgressTest(unittest.TestCase): - ''' Test that basic Progress objects work. A Progress represents a - fraction between 0 and 1. - ''' - not_started = Progress(0, 17) - part_done = Progress(2, 6) - half_done = Progress(3, 6) - also_half_done = Progress(1, 2) - done = Progress(7, 7) - - def test_create_object(self): - # These should work: - p = Progress(0, 2) - p = Progress(1, 2) - p = Progress(2, 2) - - p = Progress(2.5, 5.0) - p = Progress(3.7, 12.3333) - - # These shouldn't - self.assertRaises(ValueError, Progress, 0, 0) - self.assertRaises(ValueError, Progress, 2, 0) - self.assertRaises(ValueError, Progress, 1, -2) - - self.assertRaises(TypeError, Progress, 0, "all") - # check complex numbers just for the heck of it :) - self.assertRaises(TypeError, Progress, 2j, 3) - - def test_clamp(self): - self.assertEqual((2, 2), Progress(3, 2).frac()) - self.assertEqual((0, 2), Progress(-2, 2).frac()) - - def test_frac(self): - p = Progress(1, 2) - (a, b) = p.frac() - self.assertEqual(a, 1) - self.assertEqual(b, 2) - - def test_percent(self): - self.assertEqual(self.not_started.percent(), 0) - self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333) - self.assertEqual(self.half_done.percent(), 50) - self.assertEqual(self.done.percent(), 100) - - self.assertEqual(self.half_done.percent(), self.also_half_done.percent()) - - def test_started(self): - self.assertFalse(self.not_started.started()) - - self.assertTrue(self.part_done.started()) - self.assertTrue(self.half_done.started()) - self.assertTrue(self.done.started()) - - def test_inprogress(self): - # only true if working on it - self.assertFalse(self.done.inprogress()) - self.assertFalse(self.not_started.inprogress()) - - self.assertTrue(self.part_done.inprogress()) - self.assertTrue(self.half_done.inprogress()) - - def test_done(self): - self.assertTrue(self.done.done()) - self.assertFalse(self.half_done.done()) - self.assertFalse(self.not_started.done()) - - def test_str(self): - self.assertEqual(str(self.not_started), "0/17") - self.assertEqual(str(self.part_done), "2/6") - self.assertEqual(str(self.done), "7/7") - - def test_ternary_str(self): - self.assertEqual(self.not_started.ternary_str(), "none") - self.assertEqual(self.half_done.ternary_str(), "in_progress") - self.assertEqual(self.done.ternary_str(), "done") - - def test_to_js_status(self): - '''Test the Progress.to_js_status_str() method''' - - self.assertEqual(Progress.to_js_status_str(self.not_started), "none") - self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress") - self.assertEqual(Progress.to_js_status_str(self.done), "done") - self.assertEqual(Progress.to_js_status_str(None), "NA") - - def test_to_js_detail_str(self): - '''Test the Progress.to_js_detail_str() method''' - f = Progress.to_js_detail_str - for p in (self.not_started, self.half_done, self.done): - self.assertEqual(f(p), str(p)) - # But None should be encoded as NA - self.assertEqual(f(None), "NA") - - def test_add(self): - '''Test the Progress.add_counts() method''' - p = Progress(0, 2) - p2 = Progress(1, 3) - p3 = Progress(2, 5) - pNone = None - add = lambda a, b: Progress.add_counts(a, b).frac() - - self.assertEqual(add(p, p), (0, 4)) - self.assertEqual(add(p, p2), (1, 5)) - self.assertEqual(add(p2, p3), (3, 8)) - - self.assertEqual(add(p2, pNone), p2.frac()) - self.assertEqual(add(pNone, p2), p2.frac()) - - def test_equality(self): - '''Test that comparing Progress objects for equality - works correctly.''' - p = Progress(1, 2) - p2 = Progress(2, 4) - p3 = Progress(1, 2) - self.assertTrue(p == p3) - self.assertFalse(p == p2) - - # Check != while we're at it - self.assertTrue(p != p2) - self.assertFalse(p != p3) - - -class ModuleProgressTest(unittest.TestCase): - ''' Test that get_progress() does the right thing for the different modules - ''' - def test_xmodule_default(self): - '''Make sure default get_progress exists, returns None''' - xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {}) - p = xm.get_progress() - self.assertEqual(p, None) diff --git a/common/lib/xmodule/xmodule/tests/test_graders.py b/common/lib/xmodule/xmodule/tests/test_graders.py new file mode 100644 index 0000000000..fa0e94d2d5 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_graders.py @@ -0,0 +1,220 @@ +"""Grading tests""" +import unittest + +from xmodule import graders +from xmodule.graders import Score, aggregate_scores + +class GradesheetTest(unittest.TestCase): + + def test_weighted_grading(self): + scores = [] + Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible) + + all, graded = aggregate_scores(scores) + self.assertEqual(all, Score(earned=0, possible=0, graded=False, section="summary")) + self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + + scores.append(Score(earned=0, possible=5, graded=False, section="summary")) + all, graded = aggregate_scores(scores) + self.assertEqual(all, Score(earned=0, possible=5, graded=False, section="summary")) + self.assertEqual(graded, Score(earned=0, possible=0, graded=True, section="summary")) + + scores.append(Score(earned=3, possible=5, graded=True, section="summary")) + all, graded = aggregate_scores(scores) + self.assertAlmostEqual(all, Score(earned=3, possible=10, graded=False, section="summary")) + self.assertAlmostEqual(graded, Score(earned=3, possible=5, graded=True, section="summary")) + + scores.append(Score(earned=2, possible=5, graded=True, section="summary")) + all, graded = aggregate_scores(scores) + self.assertAlmostEqual(all, Score(earned=5, possible=15, graded=False, section="summary")) + self.assertAlmostEqual(graded, Score(earned=5, possible=10, graded=True, section="summary")) + + +class GraderTest(unittest.TestCase): + + empty_gradesheet = { + } + + incomplete_gradesheet = { + 'Homework': [], + 'Lab': [], + 'Midterm': [], + } + + test_gradesheet = { + 'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'), + Score(earned=16, possible=16.0, graded=True, section='hw2')], + #The dropped scores should be from the assignments that don't exist yet + + 'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped + Score(earned=1, possible=1.0, graded=True, section='lab2'), + Score(earned=1, possible=1.0, graded=True, section='lab3'), + Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped + Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped + Score(earned=6, possible=7.0, graded=True, section='lab6'), + Score(earned=5, possible=6.0, graded=True, section='lab7')], + + 'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ], + } + + def test_SingleSectionGrader(self): + midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + lab4Grader = graders.SingleSectionGrader("Lab", "lab4") + badLabGrader = graders.SingleSectionGrader("Lab", "lab42") + + for graded in [midtermGrader.grade(self.empty_gradesheet), + midtermGrader.grade(self.incomplete_gradesheet), + badLabGrader.grade(self.test_gradesheet)]: + self.assertEqual(len(graded['section_breakdown']), 1) + self.assertEqual(graded['percent'], 0.0) + + graded = midtermGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.505) + self.assertEqual(len(graded['section_breakdown']), 1) + + graded = lab4Grader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.2) + self.assertEqual(len(graded['section_breakdown']), 1) + + def test_AssignmentFormatGrader(self): + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + noDropGrader = graders.AssignmentFormatGrader("Homework", 12, 0) + #Even though the minimum number is 3, this should grade correctly when 7 assignments are found + overflowGrader = graders.AssignmentFormatGrader("Lab", 3, 2) + labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) + + #Test the grading of an empty gradesheet + for graded in [homeworkGrader.grade(self.empty_gradesheet), + noDropGrader.grade(self.empty_gradesheet), + homeworkGrader.grade(self.incomplete_gradesheet), + noDropGrader.grade(self.incomplete_gradesheet)]: + self.assertAlmostEqual(graded['percent'], 0.0) + #Make sure the breakdown includes 12 sections, plus one summary + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = homeworkGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = noDropGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + graded = overflowGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments + self.assertEqual(len(graded['section_breakdown']), 7 + 1) + + graded = labGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.9226190476190477) + self.assertEqual(len(graded['section_breakdown']), 7 + 1) + + def test_WeightedSubsectionsGrader(self): + #First, a few sub graders + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + labGrader = graders.AssignmentFormatGrader("Lab", 7, 3) + midtermGrader = graders.SingleSectionGrader("Midterm", "Midterm Exam") + + weightedGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.25), + (labGrader, labGrader.category, 0.25), + (midtermGrader, midtermGrader.category, 0.5)]) + + overOneWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.5), + (labGrader, labGrader.category, 0.5), + (midtermGrader, midtermGrader.category, 0.5)]) + + #The midterm should have all weight on this one + zeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), + (labGrader, labGrader.category, 0.0), + (midtermGrader, midtermGrader.category, 0.5)]) + + #This should always have a final percent of zero + allZeroWeightsGrader = graders.WeightedSubsectionsGrader([(homeworkGrader, homeworkGrader.category, 0.0), + (labGrader, labGrader.category, 0.0), + (midtermGrader, midtermGrader.category, 0.0)]) + + emptyGrader = graders.WeightedSubsectionsGrader([]) + + graded = weightedGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.5106547619047619) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = overOneWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.7688095238095238) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = zeroWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.2525) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = allZeroWeightsGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + for graded in [weightedGrader.grade(self.empty_gradesheet), + weightedGrader.grade(self.incomplete_gradesheet), + zeroWeightsGrader.grade(self.empty_gradesheet), + allZeroWeightsGrader.grade(self.empty_gradesheet)]: + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = emptyGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), 0) + self.assertEqual(len(graded['grade_breakdown']), 0) + + def test_graderFromConf(self): + + #Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test + #in test_graders.WeightedSubsectionsGrader, but generate the graders with confs. + + weightedGrader = graders.grader_from_conf([ + { + 'type': "Homework", + 'min_count': 12, + 'drop_count': 2, + 'short_label': "HW", + 'weight': 0.25, + }, + { + 'type': "Lab", + 'min_count': 7, + 'drop_count': 3, + 'category': "Labs", + 'weight': 0.25 + }, + { + 'type': "Midterm", + 'name': "Midterm Exam", + 'short_label': "Midterm", + 'weight': 0.5, + }, + ]) + + emptyGrader = graders.grader_from_conf([]) + + graded = weightedGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.5106547619047619) + self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1) + self.assertEqual(len(graded['grade_breakdown']), 3) + + graded = emptyGrader.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.0) + self.assertEqual(len(graded['section_breakdown']), 0) + self.assertEqual(len(graded['grade_breakdown']), 0) + + #Test that graders can also be used instead of lists of dictionaries + homeworkGrader = graders.AssignmentFormatGrader("Homework", 12, 2) + homeworkGrader2 = graders.grader_from_conf(homeworkGrader) + + graded = homeworkGrader2.grade(self.test_gradesheet) + self.assertAlmostEqual(graded['percent'], 0.11) + self.assertEqual(len(graded['section_breakdown']), 12 + 1) + + #TODO: How do we test failure cases? The parser only logs an error when + #it can't parse something. Maybe it should throw exceptions? + diff --git a/common/lib/xmodule/xmodule/tests/test_progress.py b/common/lib/xmodule/xmodule/tests/test_progress.py new file mode 100644 index 0000000000..94a0a19d7c --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_progress.py @@ -0,0 +1,138 @@ +"""Module progress tests""" + +import unittest + +from xmodule.progress import Progress +from xmodule import x_module + +from . import i4xs + +class ProgressTest(unittest.TestCase): + ''' Test that basic Progress objects work. A Progress represents a + fraction between 0 and 1. + ''' + not_started = Progress(0, 17) + part_done = Progress(2, 6) + half_done = Progress(3, 6) + also_half_done = Progress(1, 2) + done = Progress(7, 7) + + def test_create_object(self): + # These should work: + p = Progress(0, 2) + p = Progress(1, 2) + p = Progress(2, 2) + + p = Progress(2.5, 5.0) + p = Progress(3.7, 12.3333) + + # These shouldn't + self.assertRaises(ValueError, Progress, 0, 0) + self.assertRaises(ValueError, Progress, 2, 0) + self.assertRaises(ValueError, Progress, 1, -2) + + self.assertRaises(TypeError, Progress, 0, "all") + # check complex numbers just for the heck of it :) + self.assertRaises(TypeError, Progress, 2j, 3) + + def test_clamp(self): + self.assertEqual((2, 2), Progress(3, 2).frac()) + self.assertEqual((0, 2), Progress(-2, 2).frac()) + + def test_frac(self): + p = Progress(1, 2) + (a, b) = p.frac() + self.assertEqual(a, 1) + self.assertEqual(b, 2) + + def test_percent(self): + self.assertEqual(self.not_started.percent(), 0) + self.assertAlmostEqual(self.part_done.percent(), 33.33333333333333) + self.assertEqual(self.half_done.percent(), 50) + self.assertEqual(self.done.percent(), 100) + + self.assertEqual(self.half_done.percent(), self.also_half_done.percent()) + + def test_started(self): + self.assertFalse(self.not_started.started()) + + self.assertTrue(self.part_done.started()) + self.assertTrue(self.half_done.started()) + self.assertTrue(self.done.started()) + + def test_inprogress(self): + # only true if working on it + self.assertFalse(self.done.inprogress()) + self.assertFalse(self.not_started.inprogress()) + + self.assertTrue(self.part_done.inprogress()) + self.assertTrue(self.half_done.inprogress()) + + def test_done(self): + self.assertTrue(self.done.done()) + self.assertFalse(self.half_done.done()) + self.assertFalse(self.not_started.done()) + + def test_str(self): + self.assertEqual(str(self.not_started), "0/17") + self.assertEqual(str(self.part_done), "2/6") + self.assertEqual(str(self.done), "7/7") + + def test_ternary_str(self): + self.assertEqual(self.not_started.ternary_str(), "none") + self.assertEqual(self.half_done.ternary_str(), "in_progress") + self.assertEqual(self.done.ternary_str(), "done") + + def test_to_js_status(self): + '''Test the Progress.to_js_status_str() method''' + + self.assertEqual(Progress.to_js_status_str(self.not_started), "none") + self.assertEqual(Progress.to_js_status_str(self.half_done), "in_progress") + self.assertEqual(Progress.to_js_status_str(self.done), "done") + self.assertEqual(Progress.to_js_status_str(None), "NA") + + def test_to_js_detail_str(self): + '''Test the Progress.to_js_detail_str() method''' + f = Progress.to_js_detail_str + for p in (self.not_started, self.half_done, self.done): + self.assertEqual(f(p), str(p)) + # But None should be encoded as NA + self.assertEqual(f(None), "NA") + + def test_add(self): + '''Test the Progress.add_counts() method''' + p = Progress(0, 2) + p2 = Progress(1, 3) + p3 = Progress(2, 5) + pNone = None + add = lambda a, b: Progress.add_counts(a, b).frac() + + self.assertEqual(add(p, p), (0, 4)) + self.assertEqual(add(p, p2), (1, 5)) + self.assertEqual(add(p2, p3), (3, 8)) + + self.assertEqual(add(p2, pNone), p2.frac()) + self.assertEqual(add(pNone, p2), p2.frac()) + + def test_equality(self): + '''Test that comparing Progress objects for equality + works correctly.''' + p = Progress(1, 2) + p2 = Progress(2, 4) + p3 = Progress(1, 2) + self.assertTrue(p == p3) + self.assertFalse(p == p2) + + # Check != while we're at it + self.assertTrue(p != p2) + self.assertFalse(p != p3) + + +class ModuleProgressTest(unittest.TestCase): + ''' Test that get_progress() does the right thing for the different modules + ''' + def test_xmodule_default(self): + '''Make sure default get_progress exists, returns None''' + xm = x_module.XModule(i4xs, 'a://b/c/d/e', None, {}) + p = xm.get_progress() + self.assertEqual(p, None) diff --git a/common/static/js/capa/README b/common/static/js/capa/README new file mode 100644 index 0000000000..bb698ef00e --- /dev/null +++ b/common/static/js/capa/README @@ -0,0 +1 @@ +These files really should be in the capa module, but we don't have a way to load js from there at the moment. (TODO) diff --git a/common/static/js/capa/chemical_equation_preview.js b/common/static/js/capa/chemical_equation_preview.js new file mode 100644 index 0000000000..9c5c6cd6bc --- /dev/null +++ b/common/static/js/capa/chemical_equation_preview.js @@ -0,0 +1,12 @@ +(function () { + var preview_div = $('.chemicalequationinput .equation'); + $('.chemicalequationinput input').bind("input", function(eventObject) { + $.get("/preview/chemcalc/", {"formula" : this.value}, function(response) { + if (response.error) { + preview_div.html("" + response.error + ""); + } else { + preview_div.html(response.preview); + } + }); + }); +}).call(this); diff --git a/lms/djangoapps/courseware/module_render.py b/lms/djangoapps/courseware/module_render.py index 22ab6df67b..1e45822ebf 100644 --- a/lms/djangoapps/courseware/module_render.py +++ b/lms/djangoapps/courseware/module_render.py @@ -1,6 +1,7 @@ import hashlib import json import logging +import pyparsing import sys from django.conf import settings @@ -13,6 +14,7 @@ from django.views.decorators.csrf import csrf_exempt from requests.auth import HTTPBasicAuth from capa.xqueue_interface import XQueueInterface +from capa.chem import chemcalc from courseware.access import has_access from mitxmako.shortcuts import render_to_string from models import StudentModule, StudentModuleCache @@ -471,3 +473,42 @@ def modx_dispatch(request, dispatch, location, course_id): # Return whatever the module wanted to return to the client/caller return HttpResponse(ajax_return) + +def preview_chemcalc(request): + """ + Render an html preview of a chemical formula or equation. The fact that + this is here is a bit of hack. See the note in lms/urls.py about why it's + here. (Victor is to blame.) + + request should be a GET, with a key 'formula' and value 'some formula string'. + + Returns a json dictionary: + { + 'preview' : 'the-preview-html' or '' + 'error' : 'the-error' or '' + } + """ + if request.method != "GET": + raise Http404 + + result = {'preview': '', + 'error': '' } + formula = request.GET.get('formula') + if formula is None: + result['error'] = "No formula specified." + + return HttpResponse(json.dumps(result)) + + try: + result['preview'] = chemcalc.render_to_html(formula) + except pyparsing.ParseException as p: + result['error'] = "Couldn't parse formula: {0}".format(p) + except Exception: + # this is unexpected, so log + log.warning("Error while previewing chemical formula", exc_info=True) + result['error'] = "Error while rendering preview" + + return HttpResponse(json.dumps(result)) + + + diff --git a/lms/urls.py b/lms/urls.py index 862621b7e1..035db95596 100644 --- a/lms/urls.py +++ b/lms/urls.py @@ -141,6 +141,16 @@ if settings.COURSEWARE_ENABLED: url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/modx/(?P.*?)/(?P[^/]*)$', 'courseware.module_render.modx_dispatch', name='modx_dispatch'), + + # TODO (vshnayder): This is a hack. It creates a direct connection from + # the LMS to capa functionality, and really wants to go through the + # input types system so that previews can be context-specific. + # Unfortunately, we don't have time to think through the right way to do + # that (and implement it), and it's not a terrible thing to provide a + # generic chemican-equation rendering service. + url(r'^preview/chemcalc', 'courseware.module_render.preview_chemcalc', + name='preview_chemcalc'), + url(r'^courses/(?P[^/]+/[^/]+/[^/]+)/xqueue/(?P[^/]*)/(?P.*?)/(?P[^/]*)$', 'courseware.module_render.xqueue_callback', name='xqueue_callback'), diff --git a/requirements.txt b/requirements.txt index c3322c5b7c..379d3c08d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,3 +49,4 @@ networkx pygraphviz -r repo-requirements.txt pil +nltk