Include caching definitions in split bulk operations
This commit is contained in:
@@ -153,7 +153,10 @@ class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
|
||||
|
||||
if definition_id is not None and not json_data.get('definition_loaded', False):
|
||||
definition_loader = DefinitionLazyLoader(
|
||||
self.modulestore, block_key.type, definition_id,
|
||||
self.modulestore,
|
||||
course_key,
|
||||
block_key.type,
|
||||
definition_id,
|
||||
lambda fields: self.modulestore.convert_references_to_keys(
|
||||
course_key, self.load_block_type(block_key.type),
|
||||
fields, self.course_entry.structure['blocks'],
|
||||
|
||||
@@ -8,13 +8,14 @@ class DefinitionLazyLoader(object):
|
||||
object doesn't force access during init but waits until client wants the
|
||||
definition. Only works if the modulestore is a split mongo store.
|
||||
"""
|
||||
def __init__(self, modulestore, block_type, definition_id, field_converter):
|
||||
def __init__(self, modulestore, course_key, block_type, definition_id, field_converter):
|
||||
"""
|
||||
Simple placeholder for yet-to-be-fetched data
|
||||
:param modulestore: the pymongo db connection with the definitions
|
||||
:param definition_locator: the id of the record in the above to fetch
|
||||
"""
|
||||
self.modulestore = modulestore
|
||||
self.course_key = course_key
|
||||
self.definition_locator = DefinitionLocator(block_type, definition_id)
|
||||
self.field_converter = field_converter
|
||||
|
||||
@@ -23,4 +24,4 @@ class DefinitionLazyLoader(object):
|
||||
Fetch the definition. Note, the caller should replace this lazy
|
||||
loader pointer with the result so as not to fetch more than once
|
||||
"""
|
||||
return self.modulestore.db_connection.get_definition(self.definition_locator.definition_id)
|
||||
return self.modulestore.get_definition(self.course_key, self.definition_locator.definition_id)
|
||||
|
||||
@@ -268,18 +268,17 @@ class MongoConnection(object):
|
||||
return self.definitions.find_one({'_id': key})
|
||||
|
||||
@autoretry_read()
|
||||
def find_matching_definitions(self, query):
|
||||
def get_definitions(self, definitions):
|
||||
"""
|
||||
Find the definitions matching the query. Right now the query must be a legal mongo query
|
||||
:param query: a mongo-style query of {key: [value|{$in ..}|..], ..}
|
||||
Retrieve all definitions listed in `definitions`.
|
||||
"""
|
||||
return self.definitions.find(query)
|
||||
return self.definitions.find({'$in': {'_id': definitions}})
|
||||
|
||||
def insert_definition(self, definition):
|
||||
def upsert_definition(self, definition):
|
||||
"""
|
||||
Create the definition in the db
|
||||
"""
|
||||
self.definitions.insert(definition)
|
||||
self.definitions.update({'_id': definition['_id']}, definition, upsert=True)
|
||||
|
||||
def ensure_indexes(self):
|
||||
"""
|
||||
|
||||
@@ -117,6 +117,8 @@ class SplitBulkWriteRecord(BulkOpsRecord):
|
||||
self.index = None
|
||||
self.structures = {}
|
||||
self.structures_in_db = set()
|
||||
self.definitions = {}
|
||||
self.definitions_in_db = set()
|
||||
|
||||
# TODO: This needs to track which branches have actually been modified/versioned,
|
||||
# so that copying one branch to another doesn't update the original branch.
|
||||
@@ -226,6 +228,9 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
|
||||
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
|
||||
self.db_connection.upsert_structure(bulk_write_record.structures[_id])
|
||||
|
||||
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
|
||||
self.db_connection.upsert_definition(bulk_write_record.definitions[_id])
|
||||
|
||||
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
|
||||
if bulk_write_record.initial_index is None:
|
||||
self.db_connection.insert_course_index(bulk_write_record.index)
|
||||
@@ -292,6 +297,67 @@ class SplitBulkWriteMixin(BulkOperationsMixin):
|
||||
else:
|
||||
self.db_connection.upsert_structure(structure)
|
||||
|
||||
def get_definition(self, course_key, definition_guid):
|
||||
"""
|
||||
Retrieve a single definition by id, respecting the active bulk operation
|
||||
on course_key.
|
||||
|
||||
Args:
|
||||
course_key (:class:`.CourseKey`): The course being operated on
|
||||
definition_guid (str or ObjectID): The id of the definition to load
|
||||
"""
|
||||
bulk_write_record = self._get_bulk_ops_record(course_key)
|
||||
if bulk_write_record.active:
|
||||
definition = bulk_write_record.definitions.get(definition_guid)
|
||||
|
||||
# The definition hasn't been loaded from the db yet, so load it
|
||||
if definition is None:
|
||||
definition = self.db_connection.get_definition(definition_guid)
|
||||
bulk_write_record.definitions[definition_guid] = definition
|
||||
if definition is not None:
|
||||
bulk_write_record.definitions_in_db.add(definition_guid)
|
||||
|
||||
return definition
|
||||
else:
|
||||
# cast string to ObjectId if necessary
|
||||
definition_guid = course_key.as_object_id(definition_guid)
|
||||
return self.db_connection.get_definition(definition_guid)
|
||||
|
||||
def get_definitions(self, ids):
|
||||
"""
|
||||
Return all definitions that specified in ``ids``.
|
||||
|
||||
If a definition with the same id is in both the cache and the database,
|
||||
the cached version will be preferred.
|
||||
|
||||
Arguments:
|
||||
ids (list): A list of definition ids
|
||||
"""
|
||||
definitions = []
|
||||
ids = set(ids)
|
||||
|
||||
for _, record in self._active_records:
|
||||
for definition in record.definitions.values():
|
||||
definition_id = definition.get('_id')
|
||||
if definition_id in ids:
|
||||
ids.remove(definition_id)
|
||||
definitions.append(definition)
|
||||
|
||||
definitions.extend(self.db_connection.get_definitions(list(ids)))
|
||||
return definitions
|
||||
|
||||
|
||||
def update_definition(self, course_key, definition):
|
||||
"""
|
||||
Update a definition, respecting the current bulk operation status
|
||||
(no data will be written to the database if a bulk operation is active.)
|
||||
"""
|
||||
bulk_write_record = self._get_bulk_ops_record(course_key)
|
||||
if bulk_write_record.active:
|
||||
bulk_write_record.definitions[definition['_id']] = definition
|
||||
else:
|
||||
self.db_connection.upsert_definition(definition)
|
||||
|
||||
def version_structure(self, course_key, structure, user_id):
|
||||
"""
|
||||
Copy the structure and update the history info (edited_by, edited_on, previous_version)
|
||||
@@ -530,9 +596,10 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
|
||||
if not lazy:
|
||||
# Load all descendants by id
|
||||
descendent_definitions = self.db_connection.find_matching_definitions({
|
||||
'_id': {'$in': [block['definition']
|
||||
for block in new_module_data.itervalues()]}})
|
||||
descendent_definitions = self.get_definitions([
|
||||
block['definition']
|
||||
for block in new_module_data.itervalues()
|
||||
])
|
||||
# turn into a map
|
||||
definitions = {definition['_id']: definition
|
||||
for definition in descendent_definitions}
|
||||
@@ -803,7 +870,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
self._block_matches(block_json.get('fields', {}), settings)
|
||||
):
|
||||
if content:
|
||||
definition_block = self.db_connection.get_definition(block_json['definition'])
|
||||
definition_block = self.get_definition(course_locator, block_json['definition'])
|
||||
return self._block_matches(definition_block.get('fields', {}), content)
|
||||
else:
|
||||
return True
|
||||
@@ -1016,7 +1083,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
# TODO implement
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_definition_from_data(self, new_def_data, category, user_id):
|
||||
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
|
||||
"""
|
||||
Pull the definition fields out of descriptor and save to the db as a new definition
|
||||
w/o a predecessor and return the new id.
|
||||
@@ -1037,11 +1104,11 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
},
|
||||
'schema_version': self.SCHEMA_VERSION,
|
||||
}
|
||||
self.db_connection.insert_definition(document)
|
||||
self.update_definition(course_key, document)
|
||||
definition_locator = DefinitionLocator(category, new_id)
|
||||
return definition_locator
|
||||
|
||||
def update_definition_from_data(self, definition_locator, new_def_data, user_id):
|
||||
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
|
||||
"""
|
||||
See if new_def_data differs from the persisted version. If so, update
|
||||
the persisted version and return the new id.
|
||||
@@ -1058,7 +1125,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
|
||||
# if this looks in cache rather than fresh fetches, then it will probably not detect
|
||||
# actual change b/c the descriptor and cache probably point to the same objects
|
||||
old_definition = self.db_connection.get_definition(definition_locator.definition_id)
|
||||
old_definition = self.get_definition(course_key, definition_locator.definition_id)
|
||||
if old_definition is None:
|
||||
raise ItemNotFoundError(definition_locator)
|
||||
|
||||
@@ -1072,7 +1139,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
# previous version id
|
||||
old_definition['edit_info']['previous_version'] = definition_locator.definition_id
|
||||
old_definition['schema_version'] = self.SCHEMA_VERSION
|
||||
self.db_connection.insert_definition(old_definition)
|
||||
self.update_definition(course_key, old_definition)
|
||||
return DefinitionLocator(old_definition['block_type'], old_definition['_id']), True
|
||||
else:
|
||||
return definition_locator, False
|
||||
@@ -1160,9 +1227,9 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
new_def_data = partitioned_fields.get(Scope.content, {})
|
||||
# persist the definition if persisted != passed
|
||||
if (definition_locator is None or isinstance(definition_locator.definition_id, LocalId)):
|
||||
definition_locator = self.create_definition_from_data(new_def_data, block_type, user_id)
|
||||
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
|
||||
elif new_def_data is not None:
|
||||
definition_locator, _ = self.update_definition_from_data(definition_locator, new_def_data, user_id)
|
||||
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
|
||||
|
||||
# copy the structure and modify the new one
|
||||
new_structure = self.version_structure(course_key, structure, user_id)
|
||||
@@ -1355,7 +1422,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
},
|
||||
'schema_version': self.SCHEMA_VERSION,
|
||||
}
|
||||
self.db_connection.insert_definition(definition_entry)
|
||||
self.update_definition(locator, definition_entry)
|
||||
|
||||
draft_structure = self._new_structure(
|
||||
user_id,
|
||||
@@ -1383,14 +1450,14 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
if block_fields is not None:
|
||||
root_block['fields'].update(self._serialize_fields(root_category, block_fields))
|
||||
if definition_fields is not None:
|
||||
definition = self.db_connection.get_definition(root_block['definition'])
|
||||
definition = self.get_definition(locator, root_block['definition'])
|
||||
definition['fields'].update(definition_fields)
|
||||
definition['edit_info']['previous_version'] = definition['_id']
|
||||
definition['edit_info']['edited_by'] = user_id
|
||||
definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
|
||||
definition['_id'] = ObjectId()
|
||||
definition['schema_version'] = self.SCHEMA_VERSION
|
||||
self.db_connection.insert_definition(definition)
|
||||
self.update_definition(locator, definition)
|
||||
root_block['definition'] = definition['_id']
|
||||
root_block['edit_info']['edited_on'] = datetime.datetime.now(UTC)
|
||||
root_block['edit_info']['edited_by'] = user_id
|
||||
@@ -1483,7 +1550,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
definition_locator = DefinitionLocator(original_entry['block_type'], original_entry['definition'])
|
||||
if definition_fields:
|
||||
definition_locator, is_updated = self.update_definition_from_data(
|
||||
definition_locator, definition_fields, user_id
|
||||
course_key, definition_locator, definition_fields, user_id
|
||||
)
|
||||
|
||||
# check metadata
|
||||
@@ -1607,7 +1674,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
structure = self._lookup_course(course_key).structure
|
||||
new_structure = self.version_structure(course_key, structure, user_id)
|
||||
new_id = new_structure['_id']
|
||||
is_updated = self._persist_subdag(xblock, user_id, new_structure['blocks'], new_id)
|
||||
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
|
||||
|
||||
if is_updated:
|
||||
self.update_structure(course_key, new_structure)
|
||||
@@ -1621,18 +1688,20 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
else:
|
||||
return xblock
|
||||
|
||||
def _persist_subdag(self, xblock, user_id, structure_blocks, new_id):
|
||||
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
|
||||
# persist the definition if persisted != passed
|
||||
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
|
||||
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
|
||||
is_updated = False
|
||||
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
|
||||
xblock.definition_locator = self.create_definition_from_data(
|
||||
new_def_data, xblock.category, user_id)
|
||||
course_key, new_def_data, xblock.category, user_id
|
||||
)
|
||||
is_updated = True
|
||||
elif new_def_data:
|
||||
xblock.definition_locator, is_updated = self.update_definition_from_data(
|
||||
xblock.definition_locator, new_def_data, user_id)
|
||||
course_key, xblock.definition_locator, new_def_data, user_id
|
||||
)
|
||||
|
||||
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
|
||||
# generate an id
|
||||
@@ -1654,7 +1723,7 @@ class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
|
||||
for child in xblock.children:
|
||||
if isinstance(child.block_id, LocalId):
|
||||
child_block = xblock.system.get_block(child)
|
||||
is_updated = self._persist_subdag(child_block, user_id, structure_blocks, new_id) or is_updated
|
||||
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
|
||||
children.append(BlockKey.from_usage_key(child_block.location))
|
||||
else:
|
||||
children.append(BlockKey.from_usage_key(child))
|
||||
|
||||
@@ -21,6 +21,7 @@ class TestBulkWriteMixin(unittest.TestCase):
|
||||
self.course_key = CourseLocator('org', 'course', 'run-a', branch='test')
|
||||
self.course_key_b = CourseLocator('org', 'course', 'run-b', branch='test')
|
||||
self.structure = {'this': 'is', 'a': 'structure', '_id': ObjectId()}
|
||||
self.definition = {'this': 'is', 'a': 'definition', '_id': ObjectId()}
|
||||
self.index_entry = {'this': 'is', 'an': 'index'}
|
||||
|
||||
def assertConnCalls(self, *calls):
|
||||
@@ -66,6 +67,20 @@ class TestBulkWriteMixinClosed(TestBulkWriteMixin):
|
||||
self.assertConnCalls(call.upsert_structure(self.structure))
|
||||
self.clear_cache.assert_called_once_with(self.structure['_id'])
|
||||
|
||||
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
|
||||
def test_no_bulk_read_definition(self, version_guid):
|
||||
# Reading a definition when no bulk operation is active should just call
|
||||
# through to the db_connection
|
||||
result = self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.assertConnCalls(call.get_definition(self.course_key.as_object_id(version_guid)))
|
||||
self.assertEqual(result, self.conn.get_definition.return_value)
|
||||
|
||||
def test_no_bulk_write_definition(self):
|
||||
# Writing a definition when no bulk operation is active should just
|
||||
# call through to the db_connection.
|
||||
self.bulk.update_definition(self.course_key, self.definition)
|
||||
self.assertConnCalls(call.upsert_definition(self.definition))
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_no_bulk_read_index(self, ignore_case):
|
||||
# Reading a course index when no bulk operation is active should just call
|
||||
@@ -129,6 +144,68 @@ class TestBulkWriteMixinClosed(TestBulkWriteMixin):
|
||||
self.conn.mock_calls
|
||||
)
|
||||
|
||||
def test_write_index_and_definition_on_close(self):
|
||||
original_index = {'versions': {}}
|
||||
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
|
||||
self.bulk._begin_bulk_operation(self.course_key)
|
||||
self.conn.reset_mock()
|
||||
self.bulk.update_definition(self.course_key, self.definition)
|
||||
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.definition['_id']}})
|
||||
self.assertConnCalls()
|
||||
self.bulk._end_bulk_operation(self.course_key)
|
||||
self.assertConnCalls(
|
||||
call.upsert_definition(self.definition),
|
||||
call.update_course_index(
|
||||
{'versions': {self.course_key.branch: self.definition['_id']}},
|
||||
from_index=original_index
|
||||
)
|
||||
)
|
||||
|
||||
def test_write_index_and_multiple_definitions_on_close(self):
|
||||
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
|
||||
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
|
||||
self.bulk._begin_bulk_operation(self.course_key)
|
||||
self.conn.reset_mock()
|
||||
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
|
||||
other_definition = {'another': 'definition', '_id': ObjectId()}
|
||||
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
|
||||
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}})
|
||||
self.bulk._end_bulk_operation(self.course_key)
|
||||
self.assertItemsEqual(
|
||||
[
|
||||
call.upsert_definition(self.definition),
|
||||
call.upsert_definition(other_definition),
|
||||
call.update_course_index(
|
||||
{'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}},
|
||||
from_index=original_index
|
||||
)
|
||||
],
|
||||
self.conn.mock_calls
|
||||
)
|
||||
|
||||
def test_write_definition_on_close(self):
|
||||
self.conn.get_course_index.return_value = None
|
||||
self.bulk._begin_bulk_operation(self.course_key)
|
||||
self.conn.reset_mock()
|
||||
self.bulk.update_definition(self.course_key, self.definition)
|
||||
self.assertConnCalls()
|
||||
self.bulk._end_bulk_operation(self.course_key)
|
||||
self.assertConnCalls(call.upsert_definition(self.definition))
|
||||
|
||||
def test_write_multiple_definitions_on_close(self):
|
||||
self.conn.get_course_index.return_value = None
|
||||
self.bulk._begin_bulk_operation(self.course_key)
|
||||
self.conn.reset_mock()
|
||||
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
|
||||
other_definition = {'another': 'definition', '_id': ObjectId()}
|
||||
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
|
||||
self.assertConnCalls()
|
||||
self.bulk._end_bulk_operation(self.course_key)
|
||||
self.assertItemsEqual(
|
||||
[call.upsert_definition(self.definition), call.upsert_definition(other_definition)],
|
||||
self.conn.mock_calls
|
||||
)
|
||||
|
||||
def test_write_index_and_structure_on_close(self):
|
||||
original_index = {'versions': {}}
|
||||
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
|
||||
@@ -181,6 +258,7 @@ class TestBulkWriteMixinClosed(TestBulkWriteMixin):
|
||||
get_result = self.bulk.get_structure(self.course_key, version_result['_id'])
|
||||
self.assertEquals(version_result, get_result)
|
||||
|
||||
|
||||
class TestBulkWriteMixinClosedAfterPrevTransaction(TestBulkWriteMixinClosed, TestBulkWriteMixinPreviousTransaction):
|
||||
"""
|
||||
Test that operations on with a closed transaction aren't affected by a previously executed transaction
|
||||
@@ -307,6 +385,37 @@ class TestBulkWriteMixinFindMethods(TestBulkWriteMixin):
|
||||
else:
|
||||
self.assertNotIn(db_structure(_id), results)
|
||||
|
||||
@ddt.data(
|
||||
([], [], []),
|
||||
([1, 2, 3], [1, 2], [1, 2]),
|
||||
([1, 2, 3], [1], [1, 2]),
|
||||
([1, 2, 3], [], [1, 2]),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_get_definitions(self, search_ids, active_ids, db_ids):
|
||||
db_definition = lambda _id: {'db': 'definition', '_id': _id}
|
||||
active_definition = lambda _id: {'active': 'definition', '_id': _id}
|
||||
|
||||
db_definitions = [db_definition(_id) for _id in db_ids if _id not in active_ids]
|
||||
for n, _id in enumerate(active_ids):
|
||||
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
|
||||
self.bulk._begin_bulk_operation(course_key)
|
||||
self.bulk.update_definition(course_key, active_definition(_id))
|
||||
|
||||
self.conn.get_definitions.return_value = db_definitions
|
||||
results = self.bulk.get_definitions(search_ids)
|
||||
self.conn.get_definitions.assert_called_once_with(list(set(search_ids) - set(active_ids)))
|
||||
for _id in active_ids:
|
||||
if _id in search_ids:
|
||||
self.assertIn(active_definition(_id), results)
|
||||
else:
|
||||
self.assertNotIn(active_definition(_id), results)
|
||||
for _id in db_ids:
|
||||
if _id in search_ids and _id not in active_ids:
|
||||
self.assertIn(db_definition(_id), results)
|
||||
else:
|
||||
self.assertNotIn(db_definition(_id), results)
|
||||
|
||||
def test_no_bulk_find_structures_derived_from(self):
|
||||
ids = [Mock(name='id')]
|
||||
self.conn.find_structures_derived_from.return_value = [MagicMock(name='result')]
|
||||
@@ -456,6 +565,45 @@ class TestBulkWriteMixinOpen(TestBulkWriteMixin):
|
||||
self.assertEquals(self.conn.get_structure.call_count, 1)
|
||||
self.assertEqual(result, self.structure)
|
||||
|
||||
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
|
||||
def test_read_definition_without_write_from_db(self, version_guid):
|
||||
# Reading a definition before it's been written (while in bulk operation mode)
|
||||
# returns the definition from the database
|
||||
result = self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.assertEquals(self.conn.get_definition.call_count, 1)
|
||||
self.assertEqual(result, self.conn.get_definition.return_value)
|
||||
self.assertCacheNotCleared()
|
||||
|
||||
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
|
||||
def test_read_definition_without_write_only_reads_once(self, version_guid):
|
||||
# Reading the same definition multiple times shouldn't hit the database
|
||||
# more than once
|
||||
for _ in xrange(2):
|
||||
result = self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.assertEquals(self.conn.get_definition.call_count, 1)
|
||||
self.assertEqual(result, self.conn.get_definition.return_value)
|
||||
self.assertCacheNotCleared()
|
||||
|
||||
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
|
||||
def test_read_definition_after_write_no_db(self, version_guid):
|
||||
# Reading a definition that's already been written shouldn't hit the db at all
|
||||
self.definition['_id'] = version_guid
|
||||
self.bulk.update_definition(self.course_key, self.definition)
|
||||
result = self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.assertEquals(self.conn.get_definition.call_count, 0)
|
||||
self.assertEqual(result, self.definition)
|
||||
|
||||
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
|
||||
def test_read_definition_after_write_after_read(self, version_guid):
|
||||
# Reading a definition that's been updated after being pulled from the db should
|
||||
# still get the updated value
|
||||
self.definition['_id'] = version_guid
|
||||
self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.bulk.update_definition(self.course_key, self.definition)
|
||||
result = self.bulk.get_definition(self.course_key, version_guid)
|
||||
self.assertEquals(self.conn.get_definition.call_count, 1)
|
||||
self.assertEqual(result, self.definition)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_read_index_without_write_from_db(self, ignore_case):
|
||||
# Reading the index without writing to it should pull from the database
|
||||
|
||||
@@ -115,35 +115,36 @@ def toc_for_course(user, request, course, active_chapter, active_section, field_
|
||||
field_data_cache must include data from the course module and 2 levels of its descendents
|
||||
'''
|
||||
|
||||
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
|
||||
if course_module is None:
|
||||
return None
|
||||
with modulestore().bulk_operations(course.id):
|
||||
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
|
||||
if course_module is None:
|
||||
return None
|
||||
|
||||
chapters = list()
|
||||
for chapter in course_module.get_display_items():
|
||||
if chapter.hide_from_toc:
|
||||
continue
|
||||
chapters = list()
|
||||
for chapter in course_module.get_display_items():
|
||||
if chapter.hide_from_toc:
|
||||
continue
|
||||
|
||||
sections = list()
|
||||
for section in chapter.get_display_items():
|
||||
sections = list()
|
||||
for section in chapter.get_display_items():
|
||||
|
||||
active = (chapter.url_name == active_chapter and
|
||||
section.url_name == active_section)
|
||||
active = (chapter.url_name == active_chapter and
|
||||
section.url_name == active_section)
|
||||
|
||||
if not section.hide_from_toc:
|
||||
sections.append({'display_name': section.display_name_with_default,
|
||||
'url_name': section.url_name,
|
||||
'format': section.format if section.format is not None else '',
|
||||
'due': get_extended_due_date(section),
|
||||
'active': active,
|
||||
'graded': section.graded,
|
||||
})
|
||||
if not section.hide_from_toc:
|
||||
sections.append({'display_name': section.display_name_with_default,
|
||||
'url_name': section.url_name,
|
||||
'format': section.format if section.format is not None else '',
|
||||
'due': get_extended_due_date(section),
|
||||
'active': active,
|
||||
'graded': section.graded,
|
||||
})
|
||||
|
||||
chapters.append({'display_name': chapter.display_name_with_default,
|
||||
'url_name': chapter.url_name,
|
||||
'sections': sections,
|
||||
'active': chapter.url_name == active_chapter})
|
||||
return chapters
|
||||
chapters.append({'display_name': chapter.display_name_with_default,
|
||||
'url_name': chapter.url_name,
|
||||
'sections': sections,
|
||||
'active': chapter.url_name == active_chapter})
|
||||
return chapters
|
||||
|
||||
|
||||
def get_module(user, request, usage_key, field_data_cache,
|
||||
|
||||
@@ -326,19 +326,29 @@ class TestTOC(ModuleStoreTestCase):
|
||||
self.request = factory.get(chapter_url)
|
||||
self.request.user = UserFactory()
|
||||
self.modulestore = self.store._get_modulestore_for_courseid(self.course_key)
|
||||
with check_mongo_calls(num_finds, num_sends):
|
||||
self.toy_course = self.store.get_course(self.toy_loc, depth=2)
|
||||
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
|
||||
self.toy_loc, self.request.user, self.toy_course, depth=2
|
||||
)
|
||||
with self.modulestore.bulk_operations(self.course_key):
|
||||
with check_mongo_calls(num_finds, num_sends):
|
||||
self.toy_course = self.store.get_course(self.toy_loc, depth=2)
|
||||
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
|
||||
self.toy_loc, self.request.user, self.toy_course, depth=2
|
||||
)
|
||||
|
||||
# TODO: LMS-11220: Document why split find count is 9
|
||||
# TODO: LMS-11220: Document why mongo find count is 4
|
||||
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0), (ModuleStoreEnum.Type.split, 9, 0))
|
||||
# Mongo makes 3 queries to load the course to depth 2:
|
||||
# - 1 for the course
|
||||
# - 1 for its children
|
||||
# - 1 for its grandchildren
|
||||
# Split makes 6 queries to load the course to depth 2:
|
||||
# - load the structure
|
||||
# - load 5 definitions
|
||||
# Split makes 2 queries to render the toc:
|
||||
# - it loads the active version at the start of the bulk operation
|
||||
# - it loads the course definition for inheritance, because it's outside
|
||||
# the bulk-operation marker that loaded the course descriptor
|
||||
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 2))
|
||||
@ddt.unpack
|
||||
def test_toc_toy_from_chapter(self, default_ms, num_finds, num_sends):
|
||||
def test_toc_toy_from_chapter(self, default_ms, setup_finds, setup_sends, toc_finds):
|
||||
with self.store.default_store(default_ms):
|
||||
self.setup_modulestore(default_ms, num_finds, num_sends)
|
||||
self.setup_modulestore(default_ms, setup_finds, setup_sends)
|
||||
expected = ([{'active': True, 'sections':
|
||||
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
|
||||
'format': u'Lecture Sequence', 'due': None, 'active': False},
|
||||
@@ -354,20 +364,29 @@ class TestTOC(ModuleStoreTestCase):
|
||||
'format': '', 'due': None, 'active': False}],
|
||||
'url_name': 'secret:magic', 'display_name': 'secret:magic'}])
|
||||
|
||||
with check_mongo_calls(0, 0):
|
||||
with check_mongo_calls(toc_finds, 0):
|
||||
actual = render.toc_for_course(
|
||||
self.request.user, self.request, self.toy_course, self.chapter, None, self.field_data_cache
|
||||
)
|
||||
for toc_section in expected:
|
||||
self.assertIn(toc_section, actual)
|
||||
|
||||
# TODO: LMS-11220: Document why split find count is 9
|
||||
# TODO: LMS-11220: Document why mongo find count is 4
|
||||
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0), (ModuleStoreEnum.Type.split, 9, 0))
|
||||
# Mongo makes 3 queries to load the course to depth 2:
|
||||
# - 1 for the course
|
||||
# - 1 for its children
|
||||
# - 1 for its grandchildren
|
||||
# Split makes 6 queries to load the course to depth 2:
|
||||
# - load the structure
|
||||
# - load 5 definitions
|
||||
# Split makes 2 queries to render the toc:
|
||||
# - it loads the active version at the start of the bulk operation
|
||||
# - it loads the course definition for inheritance, because it's outside
|
||||
# the bulk-operation marker that loaded the course descriptor
|
||||
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0, 0), (ModuleStoreEnum.Type.split, 6, 0, 2))
|
||||
@ddt.unpack
|
||||
def test_toc_toy_from_section(self, default_ms, num_finds, num_sends):
|
||||
def test_toc_toy_from_section(self, default_ms, setup_finds, setup_sends, toc_finds):
|
||||
with self.store.default_store(default_ms):
|
||||
self.setup_modulestore(default_ms, num_finds, num_sends)
|
||||
self.setup_modulestore(default_ms, setup_finds, setup_sends)
|
||||
section = 'Welcome'
|
||||
expected = ([{'active': True, 'sections':
|
||||
[{'url_name': 'Toy_Videos', 'display_name': u'Toy Videos', 'graded': True,
|
||||
@@ -384,7 +403,8 @@ class TestTOC(ModuleStoreTestCase):
|
||||
'format': '', 'due': None, 'active': False}],
|
||||
'url_name': 'secret:magic', 'display_name': 'secret:magic'}])
|
||||
|
||||
actual = render.toc_for_course(self.request.user, self.request, self.toy_course, self.chapter, section, self.field_data_cache)
|
||||
with check_mongo_calls(toc_finds, 0):
|
||||
actual = render.toc_for_course(self.request.user, self.request, self.toy_course, self.chapter, section, self.field_data_cache)
|
||||
for toc_section in expected:
|
||||
self.assertIn(toc_section, actual)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user