Handle other places where we might be unpickling things.

We use pickling in conjunction with caches in these cases so we should
be able to fallback to the uncached behavior if the pickled data is
corrupt in some way.

eg. it's a python 2 pickle we don't know how to read.
This commit is contained in:
Feanil Patel
2019-12-05 17:03:26 -05:00
parent 684f254a77
commit 48e9724808
2 changed files with 18 additions and 5 deletions

View File

@@ -207,7 +207,15 @@ class BlockStructureStore(object):
"""
Deserializes the given data and returns the parsed block_structure.
"""
block_relations, transformer_data, block_data_map = zunpickle(serialized_data)
try:
block_relations, transformer_data, block_data_map = zunpickle(serialized_data)
except Exception:
# Somehow failed to de-serialized the data, assume it's corrupt.
bs_model = self._get_model(root_block_usage_key)
logger.warning("BlockStructure: Failed to load data from cache for %s", bs_model)
raise BlockStructureNotFound(bs_model.data_usage_key)
return BlockStructureFactory.create_new(
root_block_usage_key,
block_relations,

View File

@@ -56,11 +56,16 @@ def get_edx_api_data(api_config, resource, api, resource_id=None, querystring=No
cached = cache.get(cache_key)
if cached:
cached_response = zunpickle(cached)
if fields:
cached_response = get_fields(fields, cached_response)
try:
cached_response = zunpickle(cached)
if fields:
cached_response = get_fields(fields, cached_response)
return cached_response
return cached_response
except Exception:
# Data is corrupt in some way.
log.warning("Data for cache is corrupt for cache key %s", cache_key)
cache.delete(cache_key)
try:
endpoint = getattr(api, resource)