feat: Support to sections/subsections dict to containers objects in index [FC-0090] (#36806)

- Adds the subsections dict in the search index to be used by units
- Adds the sections dict in the search index to be used by sections.
- Rename `get_containers_contains_component` to `get_containers_contains_item` and add support to subsections/sections 
- Call `CONTENT_OBJECT_ASSOCIATIONS_CHANGED` for sections and subsections in `update_container_children`
This commit is contained in:
Chris Chávez
2025-06-02 12:41:07 -05:00
committed by GitHub
parent 3433152d90
commit 2759d15cca
10 changed files with 560 additions and 290 deletions

View File

@@ -51,8 +51,7 @@ from .documents import (
searchable_doc_for_library_block,
searchable_doc_for_key,
searchable_doc_tags,
searchable_doc_tags_for_collection,
searchable_doc_units,
searchable_doc_containers,
)
log = logging.getLogger(__name__)
@@ -452,7 +451,7 @@ def rebuild_index(status_cb: Callable[[str], None] | None = None, incremental=Fa
doc.update(searchable_doc_for_library_block(metadata))
doc.update(searchable_doc_tags(metadata.usage_key))
doc.update(searchable_doc_collections(metadata.usage_key))
doc.update(searchable_doc_units(metadata.usage_key))
doc.update(searchable_doc_containers(metadata.usage_key, "units"))
docs.append(doc)
except Exception as err: # pylint: disable=broad-except
status_cb(f"Error indexing library component {component}: {err}")
@@ -471,7 +470,7 @@ def rebuild_index(status_cb: Callable[[str], None] | None = None, incremental=Fa
try:
collection_key = lib_api.library_collection_locator(library_key, collection.key)
doc = searchable_doc_for_collection(collection_key, collection=collection)
doc.update(searchable_doc_tags_for_collection(collection_key))
doc.update(searchable_doc_tags(collection_key))
docs.append(doc)
except Exception as err: # pylint: disable=broad-except
status_cb(f"Error indexing collection {collection}: {err}")
@@ -497,6 +496,12 @@ def rebuild_index(status_cb: Callable[[str], None] | None = None, incremental=Fa
doc = searchable_doc_for_container(container_key)
doc.update(searchable_doc_tags(container_key))
doc.update(searchable_doc_collections(container_key))
container_type = lib_api.ContainerType(container_key.container_type)
match container_type:
case lib_api.ContainerType.Unit:
doc.update(searchable_doc_containers(container_key, "subsections"))
case lib_api.ContainerType.Subsection:
doc.update(searchable_doc_containers(container_key, "sections"))
docs.append(doc)
except Exception as err: # pylint: disable=broad-except
status_cb(f"Error indexing container {container.key}: {err}")
@@ -696,7 +701,7 @@ def upsert_library_collection_index_doc(collection_key: LibraryCollectionLocator
If the Collection is not found or disabled (i.e. soft-deleted), then delete it from the search index.
"""
doc = searchable_doc_for_collection(collection_key)
update_components = False
update_items = False
# Soft-deleted/disabled collections are removed from the index
# and their components updated.
@@ -755,7 +760,8 @@ def update_library_components_collections(
library_key,
component,
)
doc = searchable_doc_collections(usage_key)
doc = searchable_doc_for_key(usage_key)
doc.update(searchable_doc_collections(usage_key))
docs.append(doc)
log.info(
@@ -790,7 +796,8 @@ def update_library_containers_collections(
library_key,
container,
)
doc = searchable_doc_collections(container_key)
doc = searchable_doc_for_key(container_key)
doc.update(searchable_doc_collections(container_key))
docs.append(doc)
log.info(
@@ -855,21 +862,12 @@ def upsert_item_collections_index_docs(opaque_key: OpaqueKey):
_update_index_docs([doc])
def upsert_item_units_index_docs(opaque_key: OpaqueKey):
def upsert_item_containers_index_docs(opaque_key: OpaqueKey, container_type: str):
"""
Updates the units data in documents for the given Course/Library block
Updates the containers (units/subsections/sections) data in documents for the given Course/Library block
"""
doc = {Fields.id: meili_id_from_opaque_key(opaque_key)}
doc.update(searchable_doc_units(opaque_key))
_update_index_docs([doc])
def upsert_collection_tags_index_docs(collection_key: LibraryCollectionLocator):
"""
Updates the tags data in documents for the given library collection
"""
doc = searchable_doc_tags_for_collection(collection_key)
doc.update(searchable_doc_containers(opaque_key, container_type))
_update_index_docs([doc])

View File

@@ -67,10 +67,15 @@ class Fields:
collections = "collections"
collections_display_name = "display_name"
collections_key = "key"
# Units (dictionary) that this object belongs to.
# Containers (dictionaries) that this object belongs to.
units = "units"
units_display_name = "display_name"
units_key = "key"
subsections = "subsections"
sections = "sections"
containers_display_name = "display_name"
containers_key = "key"
sections_display_name = "display_name"
sections_key = "key"
# The "content" field is a dictionary of arbitrary data, depending on the block_type.
# It comes from each XBlock's index_dictionary() method (if present) plus some processing.
@@ -97,6 +102,8 @@ class Fields:
# List of children keys
child_usage_keys = "child_usage_keys"
# List of children display names
child_display_names = "child_display_names"
# Note: new fields or values can be added at any time, but if they need to be indexed for filtering or keyword
# search, the index configuration will need to be changed, which is only done as part of the 'reindex_studio'
@@ -255,175 +262,6 @@ def _fields_from_block(block) -> dict:
return block_data
def _tags_for_content_object(object_id: OpaqueKey) -> dict:
"""
Given an XBlock, course, library, etc., get the tag data for its index doc.
See the comments above on "Field.tags" for an explanation of the format.
e.g. for something tagged "Difficulty: Hard" and "Location: Vancouver" this
would return:
{
"tags": {
"taxonomy": ["Location", "Difficulty"],
"level0": ["Location > North America", "Difficulty > Hard"],
"level1": ["Location > North America > Canada"],
"level2": ["Location > North America > Canada > Vancouver"],
}
}
Note: despite what you might expect, because this is only used for the
filtering/refinement UI, it's fine if this is a one-way transformation.
It's not necessary to be able to re-construct the exact tag IDs nor taxonomy
IDs from this data that's stored in the search index. It's just a bunch of
strings in a particular format that the frontend knows how to render to
support hierarchical refinement by tag.
"""
# Note that we could improve performance for indexing many components from the same library/course,
# if we used get_all_object_tags() to load all the tags for the library in a single query rather than loading the
# tags for each component separately.
all_tags = tagging_api.get_object_tags(str(object_id)).all()
if not all_tags:
# Clear out tags in the index when unselecting all tags for the block, otherwise
# it would remain the last value if a cleared Fields.tags field is not included
return {Fields.tags: {}}
result = {
Fields.tags_taxonomy: [],
Fields.tags_level0: [],
# ... other levels added as needed
}
for obj_tag in all_tags:
# Add the taxonomy name:
if obj_tag.taxonomy.name not in result[Fields.tags_taxonomy]:
result[Fields.tags_taxonomy].append(obj_tag.taxonomy.name)
# Taxonomy name plus each level of tags, in a list: # e.g. ["Location", "North America", "Canada", "Vancouver"]
parts = [obj_tag.taxonomy.name] + obj_tag.get_lineage()
parts = [part.replace(" > ", " _ ") for part in parts] # Escape our separator.
# Now we build each level (tags.level0, tags.level1, etc.) as applicable.
# We have a hard-coded limit of 4 levels of tags for now (see Fields.tags above).
# A tag like "Difficulty: Hard" will only result in one level (tags.level0)
# But a tag like "Location: North America > Canada > Vancouver" would result in three levels (tags.level0:
# "North America", tags.level1: "North America > Canada", tags.level2: "North America > Canada > Vancouver")
# See the comments above on "Field.tags" for an explanation of why we use this format (basically it's the format
# required by the Instantsearch frontend).
for level in range(4):
# We use '>' as a separator because it's the default for the Instantsearch frontend library, and our
# preferred separator (\t) used in the database is ignored by Meilisearch since it's whitespace.
new_value = " > ".join(parts[0:level + 2])
if f"level{level}" not in result:
result[f"level{level}"] = [new_value]
elif new_value not in result[f"level{level}"]:
result[f"level{level}"].append(new_value)
if len(parts) == level + 2:
break # We have all the levels for this tag now (e.g. parts=["Difficulty", "Hard"] -> need level0 only)
return {Fields.tags: result}
def _collections_for_content_object(object_id: OpaqueKey) -> dict:
"""
Given an XBlock, course, library, etc., get the collections for its index doc.
e.g. for something in Collections "COL_A" and "COL_B", this would return:
{
"collections": {
"display_name": ["Collection A", "Collection B"],
"key": ["COL_A", "COL_B"],
}
}
If the object is in no collections, returns:
{
"collections": {
"display_name": [],
"key": [],
},
}
"""
result = {
Fields.collections: {
Fields.collections_display_name: [],
Fields.collections_key: [],
}
}
# Gather the collections associated with this object
collections = None
try:
if isinstance(object_id, UsageKey):
component = lib_api.get_component_from_usage_key(object_id)
collections = authoring_api.get_entity_collections(
component.learning_package_id,
component.key,
).values('key', 'title')
elif isinstance(object_id, LibraryContainerLocator):
container = lib_api.get_container(object_id, include_collections=True)
collections = container.collections
else:
log.warning(f"Unexpected key type for {object_id}")
except ObjectDoesNotExist:
log.warning(f"No library item found for {object_id}")
if not collections:
return result
for collection in collections:
result[Fields.collections][Fields.collections_display_name].append(collection["title"])
result[Fields.collections][Fields.collections_key].append(collection["key"])
return result
def _units_for_content_object(object_id: OpaqueKey) -> dict:
"""
Given an XBlock, course, library, etc., get the units for its index doc.
e.g. for something in Units "UNIT_A" and "UNIT_B", this would return:
{
"units": {
"display_name": ["Unit A", "Unit B"],
"key": ["UNIT_A", "UNIT_B"],
}
}
If the object is in no collections, returns:
{
"collections": {
"display_name": [],
"key": [],
},
}
"""
result = {
Fields.units: {
Fields.units_display_name: [],
Fields.units_key: [],
}
}
# Gather the units associated with this object
units = None
try:
if isinstance(object_id, UsageKey):
units = lib_api.get_containers_contains_component(object_id)
else:
log.warning(f"Unexpected key type for {object_id}")
except ObjectDoesNotExist:
log.warning(f"No library item found for {object_id}")
if not units:
return result
for unit in units:
result[Fields.units][Fields.units_display_name].append(unit.display_name)
result[Fields.units][Fields.units_key].append(str(unit.container_key))
return result
def _published_data_from_block(block_published) -> dict:
"""
Given an library block get the published data.
@@ -493,52 +331,6 @@ def searchable_doc_for_library_block(xblock_metadata: lib_api.LibraryXBlockMetad
return doc
def searchable_doc_tags(key: OpaqueKey) -> dict:
"""
Generate a dictionary document suitable for ingestion into a search engine
like Meilisearch or Elasticsearch, with the tags data for the given content object.
"""
doc = searchable_doc_for_key(key)
doc.update(_tags_for_content_object(key))
return doc
def searchable_doc_collections(opaque_key: OpaqueKey) -> dict:
"""
Generate a dictionary document suitable for ingestion into a search engine
like Meilisearch or Elasticsearch, with the collections data for the given content object.
"""
doc = searchable_doc_for_key(opaque_key)
doc.update(_collections_for_content_object(opaque_key))
return doc
def searchable_doc_units(opaque_key: OpaqueKey) -> dict:
"""
Generate a dictionary document suitable for ingestion into a search engine
like Meilisearch or Elasticsearch, with the units data for the given content object.
"""
doc = searchable_doc_for_key(opaque_key)
doc.update(_units_for_content_object(opaque_key))
return doc
def searchable_doc_tags_for_collection(
collection_key: LibraryCollectionLocator
) -> dict:
"""
Generate a dictionary document suitable for ingestion into a search engine
like Meilisearch or Elasticsearch, with the tags data for the given library collection.
"""
doc = searchable_doc_for_key(collection_key)
doc.update(_tags_for_content_object(collection_key))
return doc
def searchable_doc_for_course_block(block) -> dict:
"""
Generate a dictionary document suitable for ingestion into a search engine
@@ -555,6 +347,176 @@ def searchable_doc_for_course_block(block) -> dict:
return doc
def searchable_doc_tags(object_id: OpaqueKey) -> dict:
"""
Given an XBlock, course, library, etc., get the tag data for its index doc.
See the comments above on "Field.tags" for an explanation of the format.
e.g. for something tagged "Difficulty: Hard" and "Location: Vancouver" this
would return:
{
"tags": {
"taxonomy": ["Location", "Difficulty"],
"level0": ["Location > North America", "Difficulty > Hard"],
"level1": ["Location > North America > Canada"],
"level2": ["Location > North America > Canada > Vancouver"],
}
}
Note: despite what you might expect, because this is only used for the
filtering/refinement UI, it's fine if this is a one-way transformation.
It's not necessary to be able to re-construct the exact tag IDs nor taxonomy
IDs from this data that's stored in the search index. It's just a bunch of
strings in a particular format that the frontend knows how to render to
support hierarchical refinement by tag.
"""
# Note that we could improve performance for indexing many components from the same library/course,
# if we used get_all_object_tags() to load all the tags for the library in a single query rather than loading the
# tags for each component separately.
all_tags = tagging_api.get_object_tags(str(object_id)).all()
if not all_tags:
# Clear out tags in the index when unselecting all tags for the block, otherwise
# it would remain the last value if a cleared Fields.tags field is not included
return {Fields.tags: {}}
result = {
Fields.tags_taxonomy: [],
Fields.tags_level0: [],
# ... other levels added as needed
}
for obj_tag in all_tags:
# Add the taxonomy name:
if obj_tag.taxonomy.name not in result[Fields.tags_taxonomy]:
result[Fields.tags_taxonomy].append(obj_tag.taxonomy.name)
# Taxonomy name plus each level of tags, in a list: # e.g. ["Location", "North America", "Canada", "Vancouver"]
parts = [obj_tag.taxonomy.name] + obj_tag.get_lineage()
parts = [part.replace(" > ", " _ ") for part in parts] # Escape our separator.
# Now we build each level (tags.level0, tags.level1, etc.) as applicable.
# We have a hard-coded limit of 4 levels of tags for now (see Fields.tags above).
# A tag like "Difficulty: Hard" will only result in one level (tags.level0)
# But a tag like "Location: North America > Canada > Vancouver" would result in three levels (tags.level0:
# "North America", tags.level1: "North America > Canada", tags.level2: "North America > Canada > Vancouver")
# See the comments above on "Field.tags" for an explanation of why we use this format (basically it's the format
# required by the Instantsearch frontend).
for level in range(4):
# We use '>' as a separator because it's the default for the Instantsearch frontend library, and our
# preferred separator (\t) used in the database is ignored by Meilisearch since it's whitespace.
new_value = " > ".join(parts[0:level + 2])
if f"level{level}" not in result:
result[f"level{level}"] = [new_value]
elif new_value not in result[f"level{level}"]:
result[f"level{level}"].append(new_value)
if len(parts) == level + 2:
break # We have all the levels for this tag now (e.g. parts=["Difficulty", "Hard"] -> need level0 only)
return {Fields.tags: result}
def searchable_doc_collections(object_id: OpaqueKey) -> dict:
"""
Given an XBlock, course, library, etc., get the collections for its index doc.
e.g. for something in Collections "COL_A" and "COL_B", this would return:
{
"collections": {
"display_name": ["Collection A", "Collection B"],
"key": ["COL_A", "COL_B"],
}
}
If the object is in no collections, returns:
{
"collections": {
"display_name": [],
"key": [],
},
}
"""
result = {
Fields.collections: {
Fields.collections_display_name: [],
Fields.collections_key: [],
}
}
# Gather the collections associated with this object
collections = None
try:
if isinstance(object_id, UsageKey):
component = lib_api.get_component_from_usage_key(object_id)
collections = authoring_api.get_entity_collections(
component.learning_package_id,
component.key,
).values('key', 'title')
elif isinstance(object_id, LibraryContainerLocator):
container = lib_api.get_container(object_id, include_collections=True)
collections = container.collections
else:
log.warning(f"Unexpected key type for {object_id}")
except ObjectDoesNotExist:
log.warning(f"No library item found for {object_id}")
if not collections:
return result
for collection in collections:
result[Fields.collections][Fields.collections_display_name].append(collection["title"])
result[Fields.collections][Fields.collections_key].append(collection["key"])
return result
def searchable_doc_containers(object_id: OpaqueKey, container_type: str) -> dict:
"""
Given an XBlock, course, library, etc., get the containers that it is part of for its index doc.
e.g. for something in Units "UNIT_A" and "UNIT_B", this would return:
{
"units": {
"display_name": ["Unit A", "Unit B"],
"key": ["UNIT_A", "UNIT_B"],
}
}
If the object is in no containers, returns:
{
"sections": {
"display_name": [],
"key": [],
},
}
"""
container_field = getattr(Fields, container_type)
result = {
container_field: {
Fields.containers_display_name: [],
Fields.containers_key: [],
}
}
# Gather the units associated with this object
containers = None
try:
if isinstance(object_id, OpaqueKey):
containers = lib_api.get_containers_contains_item(object_id)
else:
log.warning(f"Unexpected key type for {object_id}")
except ObjectDoesNotExist:
log.warning(f"No library item found for {object_id}")
if not containers:
return result
for container in containers:
result[container_field][Fields.containers_display_name].append(container.display_name)
result[container_field][Fields.containers_key].append(str(container.container_key))
return result
def searchable_doc_for_collection(
collection_key: LibraryCollectionLocator,
*,
@@ -674,13 +636,17 @@ def searchable_doc_for_container(
for child in children
]
def get_child_names(children) -> list[str]:
return [child.display_name for child in children]
doc.update({
Fields.display_name: container.display_name,
Fields.created: container.created.timestamp(),
Fields.modified: container.modified.timestamp(),
Fields.num_children: len(draft_children),
Fields.content: {
Fields.child_usage_keys: get_child_keys(draft_children)
Fields.child_usage_keys: get_child_keys(draft_children),
Fields.child_display_names: get_child_names(draft_children),
},
Fields.publish_status: publish_status,
Fields.last_published: container.last_published.timestamp() if container.last_published else None,
@@ -699,6 +665,7 @@ def searchable_doc_for_container(
Fields.published_num_children: len(published_children),
Fields.published_content: {
Fields.child_usage_keys: get_child_keys(published_children),
Fields.child_display_names: get_child_names(published_children),
},
}

View File

@@ -44,9 +44,8 @@ from openedx.core.djangoapps.content_libraries import api as lib_api
from .api import (
only_if_meilisearch_enabled,
upsert_content_object_tags_index_doc,
upsert_collection_tags_index_docs,
upsert_item_collections_index_docs,
upsert_item_units_index_docs,
upsert_item_containers_index_docs,
)
from .tasks import (
delete_library_block_index_doc,
@@ -258,14 +257,15 @@ def content_object_associations_changed_handler(**kwargs) -> None:
# This event's changes may contain both "tags" and "collections", but this will happen rarely, if ever.
# So we allow a potential double "upsert" here.
if not content_object.changes or "tags" in content_object.changes:
if isinstance(opaque_key, LibraryCollectionLocator):
upsert_collection_tags_index_docs(opaque_key)
else:
upsert_content_object_tags_index_doc(opaque_key)
upsert_content_object_tags_index_doc(opaque_key)
if not content_object.changes or "collections" in content_object.changes:
upsert_item_collections_index_docs(opaque_key)
if not content_object.changes or "units" in content_object.changes:
upsert_item_units_index_docs(opaque_key)
upsert_item_containers_index_docs(opaque_key, "units")
if not content_object.changes or "sections" in content_object.changes:
upsert_item_containers_index_docs(opaque_key, "sections")
if not content_object.changes or "subsections" in content_object.changes:
upsert_item_containers_index_docs(opaque_key, "subsections")
@receiver(LIBRARY_CONTAINER_CREATED)

View File

@@ -256,7 +256,10 @@ class TestSearchApi(ModuleStoreTestCase):
"display_name": "Unit 1",
# description is not set for containers
"num_children": 0,
"content": {"child_usage_keys": []},
"content": {
"child_usage_keys": [],
"child_display_names": [],
},
"publish_status": "never",
"context_key": "lib:org1:lib",
"org": "org1",
@@ -276,7 +279,10 @@ class TestSearchApi(ModuleStoreTestCase):
"display_name": "Subsection 1",
# description is not set for containers
"num_children": 0,
"content": {"child_usage_keys": []},
"content": {
"child_usage_keys": [],
"child_display_names": [],
},
"publish_status": "never",
"context_key": "lib:org1:lib",
"org": "org1",
@@ -296,7 +302,10 @@ class TestSearchApi(ModuleStoreTestCase):
"display_name": "Section 1",
# description is not set for containers
"num_children": 0,
"content": {"child_usage_keys": []},
"content": {
"child_usage_keys": [],
"child_display_names": [],
},
"publish_status": "never",
"context_key": "lib:org1:lib",
"org": "org1",
@@ -336,9 +345,11 @@ class TestSearchApi(ModuleStoreTestCase):
doc_unit = copy.deepcopy(self.unit_dict)
doc_unit["tags"] = {}
doc_unit["collections"] = {'display_name': [], 'key': []}
doc_unit["subsections"] = {"display_name": [], "key": []}
doc_subsection = copy.deepcopy(self.subsection_dict)
doc_subsection["tags"] = {}
doc_subsection["collections"] = {'display_name': [], 'key': []}
doc_subsection["sections"] = {'display_name': [], 'key': []}
doc_section = copy.deepcopy(self.section_dict)
doc_section["tags"] = {}
doc_section["collections"] = {'display_name': [], 'key': []}
@@ -376,9 +387,11 @@ class TestSearchApi(ModuleStoreTestCase):
doc_unit = copy.deepcopy(self.unit_dict)
doc_unit["tags"] = {}
doc_unit["collections"] = {"display_name": [], "key": []}
doc_unit["subsections"] = {"display_name": [], "key": []}
doc_subsection = copy.deepcopy(self.subsection_dict)
doc_subsection["tags"] = {}
doc_subsection["collections"] = {'display_name': [], 'key': []}
doc_subsection["sections"] = {'display_name': [], 'key': []}
doc_section = copy.deepcopy(self.section_dict)
doc_section["tags"] = {}
doc_section["collections"] = {'display_name': [], 'key': []}
@@ -983,14 +996,21 @@ class TestSearchApi(ModuleStoreTestCase):
container_dict["id"],
)
@ddt.data(
"unit",
"subsection",
"section",
)
@override_settings(MEILISEARCH_ENABLED=True)
def test_index_library_container_metadata(self, mock_meilisearch) -> None:
def test_index_library_container_metadata(self, container_type, mock_meilisearch) -> None:
"""
Test indexing a Library Container.
"""
api.upsert_library_container_index_doc(self.unit.container_key)
container = getattr(self, container_type)
container_dict = getattr(self, f"{container_type}_dict")
api.upsert_library_container_index_doc(container.container_key)
mock_meilisearch.return_value.index.return_value.update_documents.assert_called_once_with([self.unit_dict])
mock_meilisearch.return_value.index.return_value.update_documents.assert_called_once_with([container_dict])
@ddt.data(
("unit", "lctorg1libunitunit-1-e4527f7c"),
@@ -1050,7 +1070,10 @@ class TestSearchApi(ModuleStoreTestCase):
new_unit_dict = {
**self.unit_dict,
"num_children": 1,
'content': {'child_usage_keys': [self.doc_problem1["usage_key"]]}
'content': {
'child_usage_keys': [self.doc_problem1["usage_key"]],
'child_display_names': [self.doc_problem1["display_name"]],
}
}
assert mock_meilisearch.return_value.index.return_value.update_documents.call_count == 2
@@ -1071,16 +1094,25 @@ class TestSearchApi(ModuleStoreTestCase):
None,
)
# TODO verify subsections in units
doc_block_with_subsections = {
"id": self.unit_dict["id"],
"subsections": {
"display_name": [self.subsection.display_name],
"key": [self.subsection_key],
},
}
new_subsection_dict = {
**self.subsection_dict,
"num_children": 1,
'content': {'child_usage_keys': [self.unit_key]}
'content': {
'child_usage_keys': [self.unit_key],
'child_display_names': [self.unit.display_name]
}
}
assert mock_meilisearch.return_value.index.return_value.update_documents.call_count == 1
assert mock_meilisearch.return_value.index.return_value.update_documents.call_count == 2
mock_meilisearch.return_value.index.return_value.update_documents.assert_has_calls(
[
call([doc_block_with_subsections]),
call([new_subsection_dict]),
],
any_order=True,
@@ -1095,16 +1127,25 @@ class TestSearchApi(ModuleStoreTestCase):
None,
)
# TODO verify section in subsections
doc_block_with_sections = {
"id": self.subsection_dict["id"],
"sections": {
"display_name": [self.section.display_name],
"key": [self.section_key],
},
}
new_section_dict = {
**self.section_dict,
"num_children": 1,
'content': {'child_usage_keys': [self.subsection_key]}
'content': {
'child_usage_keys': [self.subsection_key],
'child_display_names': [self.subsection.display_name],
}
}
assert mock_meilisearch.return_value.index.return_value.update_documents.call_count == 1
assert mock_meilisearch.return_value.index.return_value.update_documents.call_count == 2
mock_meilisearch.return_value.index.return_value.update_documents.assert_has_calls(
[
call([doc_block_with_sections]),
call([new_section_dict]),
],
any_order=True,

View File

@@ -26,13 +26,11 @@ try:
searchable_doc_for_course_block,
searchable_doc_for_library_block,
searchable_doc_tags,
searchable_doc_tags_for_collection,
)
from ..models import SearchAccess
except RuntimeError:
searchable_doc_for_course_block = lambda x: x
searchable_doc_tags = lambda x: x
searchable_doc_tags_for_collection = lambda x: x
searchable_doc_for_collection = lambda x: x
searchable_doc_for_container = lambda x: x
searchable_doc_for_library_block = lambda x: x
@@ -484,7 +482,7 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
def test_collection_with_library(self):
doc = searchable_doc_for_collection(self.collection_key)
doc.update(searchable_doc_tags_for_collection(self.collection_key))
doc.update(searchable_doc_tags(self.collection_key))
assert doc == {
"id": "lib-collectionedx2012_falltoy_collection-d1d907a4",
@@ -513,7 +511,7 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
library_api.publish_changes(self.library.key)
doc = searchable_doc_for_collection(self.collection_key)
doc.update(searchable_doc_tags_for_collection(self.collection_key))
doc.update(searchable_doc_tags(self.collection_key))
assert doc == {
"id": "lib-collectionedx2012_falltoy_collection-d1d907a4",
@@ -564,6 +562,7 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
"num_children": 0,
"content": {
"child_usage_keys": [],
"child_display_names": [],
},
"publish_status": "never",
"context_key": "lib:edX:2012_Fall",
@@ -609,6 +608,9 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
"child_usage_keys": [
"lb:edX:2012_Fall:html:text2",
],
"child_display_names": [
"Text",
],
},
"publish_status": "published",
"context_key": "lib:edX:2012_Fall",
@@ -628,6 +630,9 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
"child_usage_keys": [
"lb:edX:2012_Fall:html:text2",
],
"child_display_names": [
"Text",
],
},
},
}
@@ -676,6 +681,10 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
"lb:edX:2012_Fall:html:text2",
"lb:edX:2012_Fall:html:text3",
],
"child_display_names": [
"Text",
"Text",
],
},
"publish_status": "modified",
"context_key": "lib:edX:2012_Fall",
@@ -695,6 +704,9 @@ class StudioDocumentsTest(SharedModuleStoreTestCase):
"child_usage_keys": [
"lb:edX:2012_Fall:html:text2",
],
"child_display_names": [
"Text",
],
},
},
}

View File

@@ -58,7 +58,7 @@ from .block_metadata import LibraryXBlockMetadata, LibraryXBlockStaticFile
from .containers import (
create_container,
get_container,
get_containers_contains_component,
get_containers_contains_item,
update_container_children,
ContainerMetadata,
ContainerType,
@@ -229,7 +229,7 @@ def set_library_block_olx(usage_key: LibraryUsageLocatorV2, new_olx_str: str) ->
# For each container, trigger LIBRARY_CONTAINER_UPDATED signal and set background=True to trigger
# container indexing asynchronously.
affected_containers = get_containers_contains_component(usage_key)
affected_containers = get_containers_contains_item(usage_key)
for container in affected_containers:
LIBRARY_CONTAINER_UPDATED.send_event(
library_container=LibraryContainerData(
@@ -585,7 +585,7 @@ def delete_library_block(
component = get_component_from_usage_key(usage_key)
library_key = usage_key.context_key
affected_collections = authoring_api.get_entity_collections(component.learning_package_id, component.key)
affected_containers = get_containers_contains_component(usage_key)
affected_containers = get_containers_contains_item(usage_key)
authoring_api.soft_delete_draft(component.pk, deleted_by=user_id)
@@ -673,7 +673,7 @@ def restore_library_block(usage_key: LibraryUsageLocatorV2, user_id: int | None
# container indexing asynchronously.
#
# To update the components count in containers
affected_containers = get_containers_contains_component(usage_key)
affected_containers = get_containers_contains_item(usage_key)
for container in affected_containers:
LIBRARY_CONTAINER_UPDATED.send_event(
library_container=LibraryContainerData(

View File

@@ -24,7 +24,7 @@ from openedx_events.content_authoring.signals import (
LIBRARY_CONTAINER_UPDATED,
)
from openedx_learning.api import authoring as authoring_api
from openedx_learning.api.authoring_models import Container, ContainerVersion
from openedx_learning.api.authoring_models import Container, ContainerVersion, Component
from openedx.core.djangoapps.content_libraries.api.collections import library_collection_locator
from openedx.core.djangoapps.xblock.api import get_component_from_usage_key
@@ -50,7 +50,7 @@ __all__ = [
"delete_container",
"restore_container",
"update_container_children",
"get_containers_contains_component",
"get_containers_contains_item",
"publish_container_changes",
]
@@ -513,7 +513,13 @@ def update_container_children(
entities_action=entities_action,
)
# TODO add CONTENT_OBJECT_ASSOCIATIONS_CHANGED for subsections
for key in children_ids:
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.send_event(
content_object=ContentObjectChangedData(
object_id=str(key),
changes=["subsections"],
),
)
case ContainerType.Section:
subsections = [_get_container_from_key(key).subsection for key in children_ids] # type: ignore[arg-type]
new_version = authoring_api.create_next_section_version(
@@ -524,7 +530,13 @@ def update_container_children(
entities_action=entities_action,
)
# TODO add CONTENT_OBJECT_ASSOCIATIONS_CHANGED for sections
for key in children_ids:
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.send_event(
content_object=ContentObjectChangedData(
object_id=str(key),
changes=["sections"],
),
)
case _:
raise ValueError(f"Invalid container type: {container_type}")
@@ -537,19 +549,26 @@ def update_container_children(
return ContainerMetadata.from_container(library_key, new_version.container)
def get_containers_contains_component(
usage_key: LibraryUsageLocatorV2
def get_containers_contains_item(
key: LibraryUsageLocatorV2 | LibraryContainerLocator
) -> list[ContainerMetadata]:
"""
Get containers that contains the component.
Get containers that contains the item,
that can be a component or another container.
"""
assert isinstance(usage_key, LibraryUsageLocatorV2)
component = get_component_from_usage_key(usage_key)
item: Component | Container
if isinstance(key, LibraryUsageLocatorV2):
item = get_component_from_usage_key(key)
elif isinstance(key, LibraryContainerLocator):
item = _get_container_from_key(key)
containers = authoring_api.get_containers_with_entity(
component.publishable_entity.pk,
item.publishable_entity.pk,
)
return [
ContainerMetadata.from_container(usage_key.context_key, container)
ContainerMetadata.from_container(key.lib_key, container)
for container in containers
]

View File

@@ -121,7 +121,7 @@ class LibraryContextImpl(LearningContext):
with the given usage_key.
"""
assert isinstance(usage_key, LibraryUsageLocatorV2)
affected_containers = api.get_containers_contains_component(usage_key)
affected_containers = api.get_containers_contains_item(usage_key)
for container in affected_containers:
LIBRARY_CONTAINER_UPDATED.send_event(
library_container=LibraryContainerData(

View File

@@ -99,7 +99,7 @@ def send_events_after_publish(publish_log_pk: int, library_key_str: str) -> None
# Publishing a container will auto-publish its children, but publishing a single component or all changes
# in the library will NOT usually include any parent containers. But we do need to notify listeners that the
# parent container(s) have changed, e.g. so the search index can update the "has_unpublished_changes"
for parent_container in api.get_containers_contains_component(usage_key):
for parent_container in api.get_containers_contains_item(usage_key):
affected_containers.add(parent_container.container_key)
# TODO: should this be a CONTAINER_CHILD_PUBLISHED event instead of CONTAINER_PUBLISHED ?
elif hasattr(record.entity, "container"):
@@ -181,7 +181,7 @@ def send_events_after_revert(draft_change_log_id: int, library_key_str: str) ->
# If any containers contain this component, their child list / component count may need to be updated
# e.g. if this was a newly created component in the container and is now deleted, or this was deleted and
# is now restored.
for parent_container in api.get_containers_contains_component(usage_key):
for parent_container in api.get_containers_contains_item(usage_key):
updated_container_keys.add(parent_container.container_key)
# TODO: do we also need to send CONTENT_OBJECT_ASSOCIATIONS_CHANGED for this component, or is

View File

@@ -746,6 +746,39 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
# Create Units
self.unit1 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-1', 'Unit 1', None)
self.unit2 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-2', 'Unit 2', None)
self.unit3 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-3', 'Unit 3', None)
# Create Subsections
self.subsection1 = api.create_container(
self.lib1.library_key,
api.ContainerType.Subsection,
'subsection-1',
'Subsection 1',
None,
)
self.subsection2 = api.create_container(
self.lib1.library_key,
api.ContainerType.Subsection,
'subsection-2',
'Subsection 2',
None,
)
# Create Sections
self.section1 = api.create_container(
self.lib1.library_key,
api.ContainerType.Section,
'section-1',
'Section 1',
None,
)
self.section2 = api.create_container(
self.lib1.library_key,
api.ContainerType.Section,
'section-2',
'Section 2',
None,
)
# Create XBlocks
# Create some library blocks in lib1
@@ -753,6 +786,9 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
self.lib1.library_key, "problem", "problem1",
)
self.problem_block_usage_key = LibraryUsageLocatorV2.from_string(self.problem_block["id"])
self.problem_block_2 = self._add_block_to_library(
self.lib1.library_key, "problem", "problem2",
)
self.html_block = self._add_block_to_library(
self.lib1.library_key, "html", "html1",
)
@@ -770,9 +806,37 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
None,
)
def test_get_containers_contains_component(self) -> None:
problem_block_containers = api.get_containers_contains_component(self.problem_block_usage_key)
html_block_containers = api.get_containers_contains_component(self.html_block_usage_key)
# Add units to subsections
api.update_container_children(
self.subsection1.container_key,
[self.unit1.container_key, self.unit2.container_key],
None,
)
api.update_container_children(
self.subsection2.container_key,
[self.unit1.container_key],
None,
)
# Add subsections to sections
api.update_container_children(
self.section1.container_key,
[self.subsection1.container_key, self.subsection2.container_key],
None,
)
api.update_container_children(
self.section2.container_key,
[self.subsection1.container_key],
None,
)
def test_get_containers_contains_item(self):
problem_block_containers = api.get_containers_contains_item(self.problem_block_usage_key)
html_block_containers = api.get_containers_contains_item(self.html_block_usage_key)
unit_1_containers = api.get_containers_contains_item(self.unit1.container_key)
unit_2_containers = api.get_containers_contains_item(self.unit2.container_key)
subsection_1_containers = api.get_containers_contains_item(self.subsection1.container_key)
subsection_2_containers = api.get_containers_contains_item(self.subsection2.container_key)
assert len(problem_block_containers) == 1
assert problem_block_containers[0].container_key == self.unit1.container_key
@@ -781,7 +845,21 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
assert html_block_containers[0].container_key == self.unit1.container_key
assert html_block_containers[1].container_key == self.unit2.container_key
def _validate_calls_of_html_block(self, event_mock) -> None:
assert len(unit_1_containers) == 2
assert unit_1_containers[0].container_key == self.subsection1.container_key
assert unit_1_containers[1].container_key == self.subsection2.container_key
assert len(unit_2_containers) == 1
assert unit_2_containers[0].container_key == self.subsection1.container_key
assert len(subsection_1_containers) == 2
assert subsection_1_containers[0].container_key == self.section1.container_key
assert subsection_1_containers[1].container_key == self.section2.container_key
assert len(subsection_2_containers) == 1
assert subsection_2_containers[0].container_key == self.section1.container_key
def _validate_calls_of_html_block(self, event_mock):
"""
Validate that the `event_mock` has been called twice
using the `LIBRARY_CONTAINER_UPDATED` signal.
@@ -875,6 +953,76 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
event_reciver.call_args_list[0].kwargs,
)
def test_call_object_changed_signal_when_remove_unit(self) -> None:
unit4 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-4', 'Unit 4', None)
api.update_container_children(
self.subsection2.container_key,
[unit4.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.APPEND,
)
event_reciver = mock.Mock()
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.connect(event_reciver)
api.update_container_children(
self.subsection2.container_key,
[unit4.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.REMOVE,
)
assert event_reciver.call_count == 1
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(unit4.container_key),
changes=["subsections"],
),
},
event_reciver.call_args_list[0].kwargs,
)
def test_call_object_changed_signal_when_remove_subsection(self) -> None:
subsection3 = api.create_container(
self.lib1.library_key,
api.ContainerType.Subsection,
'subsection-3',
'Subsection 3',
None,
)
api.update_container_children(
self.section2.container_key,
[subsection3.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.APPEND,
)
event_reciver = mock.Mock()
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.connect(event_reciver)
api.update_container_children(
self.section2.container_key,
[subsection3.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.REMOVE,
)
assert event_reciver.call_count == 1
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(subsection3.container_key),
changes=["sections"],
),
},
event_reciver.call_args_list[0].kwargs,
)
def test_call_object_changed_signal_when_add_component(self) -> None:
event_reciver = mock.Mock()
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.connect(event_reciver)
@@ -919,19 +1067,104 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
event_reciver.call_args_list[1].kwargs,
)
def test_call_object_changed_signal_when_add_unit(self) -> None:
event_reciver = mock.Mock()
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.connect(event_reciver)
unit4 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-4', 'Unit 4', None)
unit5 = api.create_container(self.lib1.library_key, api.ContainerType.Unit, 'unit-5', 'Unit 5', None)
api.update_container_children(
self.subsection2.container_key,
[unit4.container_key, unit5.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.APPEND,
)
assert event_reciver.call_count == 2
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(unit4.container_key),
changes=["subsections"],
),
},
event_reciver.call_args_list[0].kwargs,
)
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(unit5.container_key),
changes=["subsections"],
),
},
event_reciver.call_args_list[1].kwargs,
)
def test_call_object_changed_signal_when_add_subsection(self) -> None:
event_reciver = mock.Mock()
CONTENT_OBJECT_ASSOCIATIONS_CHANGED.connect(event_reciver)
subsection3 = api.create_container(
self.lib1.library_key,
api.ContainerType.Subsection,
'subsection-3',
'Subsection 3',
None,
)
subsection4 = api.create_container(
self.lib1.library_key,
api.ContainerType.Subsection,
'subsection-4',
'Subsection 4',
None,
)
api.update_container_children(
self.section2.container_key,
[subsection3.container_key, subsection4.container_key],
None,
entities_action=authoring_api.ChildrenEntitiesAction.APPEND,
)
assert event_reciver.call_count == 2
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(subsection3.container_key),
changes=["sections"],
),
},
event_reciver.call_args_list[0].kwargs,
)
self.assertDictContainsSubset(
{
"signal": CONTENT_OBJECT_ASSOCIATIONS_CHANGED,
"sender": None,
"content_object": ContentObjectChangedData(
object_id=str(subsection4.container_key),
changes=["sections"],
),
},
event_reciver.call_args_list[1].kwargs,
)
def test_delete_component_and_revert(self) -> None:
"""
When a component is deleted and then the delete is reverted, signals
will be emitted to update any containing containers.
"""
# Add components and publish
api.update_container_children(self.unit1.container_key, [
LibraryUsageLocatorV2.from_string(self.problem_block["id"]),
api.update_container_children(self.unit3.container_key, [
LibraryUsageLocatorV2.from_string(self.problem_block_2["id"]),
], user_id=None)
api.publish_changes(self.lib1.library_key)
# Delete component and revert
api.delete_library_block(LibraryUsageLocatorV2.from_string(self.problem_block["id"]))
api.delete_library_block(LibraryUsageLocatorV2.from_string(self.problem_block_2["id"]))
container_event_receiver = mock.Mock()
LIBRARY_CONTAINER_UPDATED.connect(container_event_receiver)
@@ -942,5 +1175,5 @@ class ContentLibraryContainersTest(ContentLibrariesRestApiTest):
assert {
"signal": LIBRARY_CONTAINER_UPDATED,
"sender": None,
"library_container": LibraryContainerData(container_key=self.unit1.container_key),
"library_container": LibraryContainerData(container_key=self.unit3.container_key),
}.items() <= container_event_receiver.call_args_list[0].kwargs.items()