From 444f51d6de684d7f40c388d16e02b00a69e5391c Mon Sep 17 00:00:00 2001 From: Felix Sun Date: Tue, 27 Aug 2013 09:41:29 -0400 Subject: [PATCH] Fixed some pep/pylint violations. --- common/lib/capa/capa/responsetypes.py | 12 +++++------- common/lib/xmodule/xmodule/crowdsource_hinter.py | 4 ++-- .../xmodule/xmodule/tests/test_crowdsource_hinter.py | 8 +++++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 550042d1df..b53f38fd90 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -915,16 +915,14 @@ class NumericalResponse(LoncapaResponse): else: return CorrectMap(self.answer_id, 'incorrect') - # TODO: add check_hint_condition(self, hxml_set, student_answers) - - def compare_answer(self, a, b): + def compare_answer(self, ans1, ans2): """ Outside-facing function that lets us compare two numerical answers, with this problem's tolerance. """ return compare_with_tolerance( - evaluator({}, {}, a), - evaluator({}, {}, b), + evaluator({}, {}, ans1), + evaluator({}, {}, ans2), self.tolerance ) @@ -1886,11 +1884,11 @@ class FormulaResponse(LoncapaResponse): else: return "incorrect" - def compare_answer(self, a, b): + def compare_answer(self, ans1, ans2): """ An external interface for comparing whether a and b are equal. """ - internal_result = self.check_formula(a, b, self.samples) + internal_result = self.check_formula(ans1, ans2, self.samples) return internal_result == "correct" def validate_answer(self, answer): diff --git a/common/lib/xmodule/xmodule/crowdsource_hinter.py b/common/lib/xmodule/xmodule/crowdsource_hinter.py index f2d21c459b..7e538efa24 100644 --- a/common/lib/xmodule/xmodule/crowdsource_hinter.py +++ b/common/lib/xmodule/xmodule/crowdsource_hinter.py @@ -229,12 +229,12 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule): # The brackets surrounding the index are for backwards compatability purposes. # (It used to be that each answer was paired with multiple hints in a list.) self.previous_answers += [[best_hint_answer, [best_hint_index]]] - for i in xrange(min(2, n_hints - 1)): + for _ in xrange(min(2, n_hints - 1)): # Keep making random hints until we hit a target, or run out. while True: # random.choice randomly chooses an element from its input list. # (We then unpack the item, in this case data for a hint.) - (hint_index, (rand_hint, votes, hint_answer)) =\ + (hint_index, (rand_hint, _, hint_answer)) =\ random.choice(matching_hints.items()) if rand_hint not in hints: break diff --git a/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py b/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py index d28d2ee06b..8347b71076 100644 --- a/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py +++ b/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py @@ -120,9 +120,9 @@ class CHModuleFactory(object): return False responder.validate_answer = validate_answer - def compare_answer(a, b): + def compare_answer(ans1, ans2): """ A fake answer comparer """ - return a == b + return ans1 == ans2 responder.compare_answer = compare_answer capa_module.lcp.responders = {'responder0': responder} @@ -189,11 +189,13 @@ class VerticalWithModulesFactory(object): @staticmethod def next_num(): + """Increments a global counter for naming.""" CHModuleFactory.num += 1 return CHModuleFactory.num @staticmethod def create(): + """Make a vertical.""" model_data = {'data': VerticalWithModulesFactory.sample_problem_xml} system = get_test_system() descriptor = VerticalDescriptor.from_xml(VerticalWithModulesFactory.sample_problem_xml, system) @@ -532,7 +534,7 @@ class CrowdsourceHinterTest(unittest.TestCase): """ mock_module = CHModuleFactory.create() - def fake_get_hint(get): + def fake_get_hint(_): """ Creates a rendering dictionary, with which we can test the templates.