Merge branch 'master' into fix-FB-share

This commit is contained in:
Edward Zarecor
2023-09-07 16:09:14 -04:00
committed by GitHub
1388 changed files with 37610 additions and 22256 deletions

View File

@@ -103,6 +103,7 @@ common/test/data/badges/*.png
### Static assets pipeline artifacts
**/*.scssc
lms/static/css/
!lms/static/css/vendor
lms/static/certificates/css/
cms/static/css/
common/static/common/js/vendor/

View File

@@ -27,6 +27,7 @@
"func-names": "off",
"indent": ["error", 4],
"react/jsx-indent": ["error", 4],
"react/jsx-indent-props": ["error", 4],
"new-cap": "off",
"no-else-return": "off",
"no-shadow": "error",
@@ -70,7 +71,6 @@
"prefer-rest-params": "off",
"prefer-template": "off",
"radix": "off",
"react/jsx-indent-props": ["error", 4],
"react/prop-types": "off",
"vars-on-top": "off"
}

4
.github/CODEOWNERS vendored
View File

@@ -43,9 +43,9 @@ openedx/features/content_type_gating/
openedx/features/course_duration_limits/
openedx/features/discounts/
# Ping tCRIL On-call if someone uses the QuickStart
# Ping Axim On-call if someone uses the QuickStart
# https://docs.openedx.org/en/latest/developers/quickstarts/first_openedx_pr.html
lms/templates/dashboard.html @openedx/tcril-oncall
lms/templates/dashboard.html @openedx/axim-oncall
# Ensure minimal.yml stays minimal, this could be a team in the future
# but it's just me for now, others can sign up if they care as well.

View File

@@ -0,0 +1,69 @@
# Rejects PR if requirements files are inconsistent.
#
# This will produce a failing check for any PR that does not produce a
# clean run of `make compile-requirements` on Linux.
name: Consistent Python dependencies
on:
pull_request:
paths:
- 'requirements/**'
push:
branches:
- master
paths:
- 'requirements/**'
defaults:
run:
shell: bash # strict bash
jobs:
check-requirements:
name: Compile requirements
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.8'
- run: |
make compile-requirements
- name: Fail if compiling requirements caused changes
run: |
SUMMARY_HELP=$(cat <<'EOMARKDOWN'
# Inconsistent Python dependencies
It appears that the Python dependencies in this PR are inconsistent: A re-run of
`make compile-requirements` produced changes. This might mean that your PR would
fail to deploy properly in production, or could have inconsistent behavior for
developers.
Please see the requirements README for information on how to resolve this:
https://github.com/openedx/edx-platform/blob/master/requirements/README.rst#inconsistent-dependencies
EOMARKDOWN
)
make_summary () {
echo "$SUMMARY_HELP"
echo
echo "----"
echo
echo "Diff follows:"
echo
echo '```'
git diff || true
echo '```'
}
git diff --quiet --exit-code || {
# Job Summaries are cool, but echo to the job log as well, because
# that's where the PR checks will actually link to.
make_summary | tee -a $GITHUB_STEP_SUMMARY
exit 1
}

View File

@@ -16,7 +16,7 @@ services:
retries: 10
edxapp:
image: edxops/edxapp:latest
command: bash -c 'source /edx/app/edxapp/edxapp_env && cd /edx/app/edxapp/edx-platform/ && paver update_db'
command: bash -c 'source /edx/app/edxapp/edxapp_env && cd /edx/app/edxapp/edx-platform/ && make migrate'
volumes:
- ../../:/edx/app/edxapp/edx-platform
depends_on:

View File

@@ -9,14 +9,35 @@ jobs:
# See also https://docs.docker.com/docker-hub/builds/
push:
runs-on: ubuntu-latest
if: github.event_name == 'push'
if: github.event_name == 'push'
strategy:
matrix:
variant:
- "lms_dev"
- "cms_dev"
- "cms"
- "lms"
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build and Push docker image
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and push lms base docker image
env:
DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
run : make docker_push
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
run : make docker_tag_build_push_${{matrix.variant}}

View File

@@ -1,50 +0,0 @@
name: Docs build
on:
pull_request:
push:
branches:
- master
jobs:
tests:
name: Docs build
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ['3.8']
os: ['ubuntu-20.04']
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install system requirements
run: sudo apt update && sudo apt install -y libxmlsec1-dev
- name: Install pip
run: python -m pip install -r requirements/pip.txt
- name: Get pip cache dir
id: pip-cache-dir
run: echo "::set-output name=dir::$(pip cache dir)"
- name: Cache pip dependencies
id: cache-dependencies
uses: actions/cache@v3
with:
path: ${{ steps.pip-cache-dir.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/development.txt') }}
restore-keys: ${{ runner.os }}-pip-
- name: Install python dependencies
run: make dev-requirements
- name: Install docs requirements
run: pip install -r requirements/edx/doc.txt
- name: Docs build
run: make docs

View File

@@ -2,8 +2,6 @@ name: Javascript tests
on:
pull_request:
branches:
- master
push:
branches:
- master

View File

@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v2
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.TOOLS_EDX_ECR_USER_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.TOOLS_EDX_ECR_USER_AWS_SECRET_ACCESS_KEY }}

View File

@@ -14,7 +14,7 @@ jobs:
matrix:
include:
- module-name: lms-1
path: "--django-settings-module=lms.envs.test lms/djangoapps/badges/ lms/djangoapps/branding/ lms/djangoapps/bulk_email/ lms/djangoapps/bulk_enroll/ lms/djangoapps/bulk_user_retirement/ lms/djangoapps/ccx/ lms/djangoapps/certificates/ lms/djangoapps/commerce/ lms/djangoapps/course_api/ lms/djangoapps/course_blocks/ lms/djangoapps/course_home_api/ lms/djangoapps/course_wiki/ lms/djangoapps/coursewarehistoryextended/ lms/djangoapps/debug/ lms/djangoapps/courseware/ lms/djangoapps/course_goals/ lms/djangoapps/rss_proxy/ lms/djangoapps/save_for_later/"
path: "--django-settings-module=lms.envs.test lms/djangoapps/badges/ lms/djangoapps/branding/ lms/djangoapps/bulk_email/ lms/djangoapps/bulk_enroll/ lms/djangoapps/bulk_user_retirement/ lms/djangoapps/ccx/ lms/djangoapps/certificates/ lms/djangoapps/commerce/ lms/djangoapps/course_api/ lms/djangoapps/course_blocks/ lms/djangoapps/course_home_api/ lms/djangoapps/course_wiki/ lms/djangoapps/coursewarehistoryextended/ lms/djangoapps/debug/ lms/djangoapps/courseware/ lms/djangoapps/course_goals/ lms/djangoapps/rss_proxy/"
- module-name: lms-2
path: "--django-settings-module=lms.envs.test lms/djangoapps/gating/ lms/djangoapps/grades/ lms/djangoapps/instructor/ lms/djangoapps/instructor_analytics/ lms/djangoapps/discussion/ lms/djangoapps/edxnotes/ lms/djangoapps/email_marketing/ lms/djangoapps/experiments/ lms/djangoapps/instructor_task/ lms/djangoapps/learner_dashboard/ lms/djangoapps/learner_recommendations/ lms/djangoapps/learner_home/ lms/djangoapps/lms_initialization/ lms/djangoapps/lms_xblock/ lms/djangoapps/lti_provider/ lms/djangoapps/mailing/ lms/djangoapps/mobile_api/ lms/djangoapps/monitoring/ lms/djangoapps/ora_staff_grader/ lms/djangoapps/program_enrollments/ lms/djangoapps/rss_proxy lms/djangoapps/static_template_view/ lms/djangoapps/staticbook/ lms/djangoapps/support/ lms/djangoapps/survey/ lms/djangoapps/teams/ lms/djangoapps/tests/ lms/djangoapps/user_tours/ lms/djangoapps/verify_student/ lms/djangoapps/mfe_config_api/ lms/envs/ lms/lib/ lms/tests.py"
- module-name: openedx-1
@@ -71,7 +71,7 @@ jobs:
# https://github.com/marketplace/actions/alls-green#why
# https://github.com/orgs/community/discussions/33579
success:
name: Tests successful
name: Pylint checks successful
if: always()
needs:
- run-pylint

View File

@@ -64,7 +64,6 @@
"lms/djangoapps/ora_staff_grader/",
"lms/djangoapps/program_enrollments/",
"lms/djangoapps/rss_proxy/",
"lms/djangoapps/save_for_later/",
"lms/djangoapps/static_template_view/",
"lms/djangoapps/staticbook/",
"lms/djangoapps/support/",

View File

@@ -81,7 +81,7 @@ jobs:
# https://github.com/marketplace/actions/alls-green#why
# https://github.com/orgs/community/discussions/33579
success:
name: Tests successful
name: Unit tests successful
if: (github.repository == 'edx/edx-platform-private') || (github.repository == 'openedx/edx-platform' && (startsWith(github.base_ref, 'open-release') == false))
needs:
- run-tests

View File

@@ -12,6 +12,10 @@ on:
description: 'Name of package to upgrade'
required: true
type: string
version:
description: 'Version number to upgrade to in constraints.txt (only needed if pinned)'
default: ''
type: string
change_desc:
description: |
Description of change, for commit message and PR. (What does the new version add or fix?)
@@ -37,6 +41,13 @@ jobs:
with:
python-version: "3.8"
- name: Update any pinned dependencies
env:
NEW_VERSION: "${{ inputs.version }}"
PACKAGE: "${{ inputs.package }}"
run: |
sed 's/^\('$PACKAGE'[^#]*\)==[^ #]\+/\1=='$NEW_VERSION'/' -i requirements/constraints.txt
- name: Run make upgrade-package
env:
PACKAGE: "${{ inputs.package }}"
@@ -84,6 +95,7 @@ jobs:
body: |
${{ env.body_prefix }}PR generated by workflow `${{ github.workflow_ref }}` on behalf of @${{ github.triggering_actor }}.
assignees: "${{ github.triggering_actor }}"
reviewers: "${{ github.triggering_actor }}"
- name: Job summary
env:

View File

@@ -5,6 +5,11 @@ build:
tools:
python: "3.8"
sphinx:
configuration: docs/conf.py
python:
install:
- requirements: "requirements/edx/doc.txt"
- method: pip
path: .

View File

@@ -54,9 +54,14 @@ RUN apt-get update && \
python3-venv \
python3.8 \
python3.8-minimal \
# python3-dev: required for building mysqlclient python package version 2.2.0
python3-dev \
libpython3.8 \
libpython3.8-stdlib \
libmysqlclient21 \
# libmysqlclient-dev: required for building mysqlclient python package version 2.2.0
libmysqlclient-dev \
pkg-config \
libssl1.1 \
libxmlsec1-openssl \
# lynx: Required by https://github.com/openedx/edx-platform/blob/b489a4ecb122/openedx/core/lib/html_to_text.py#L16
@@ -85,13 +90,10 @@ FROM minimal-system as builder-production
RUN apt-get update && \
apt-get -y install --no-install-recommends \
curl \
pkg-config \
libmysqlclient-dev \
libssl-dev \
libxml2-dev \
libxmlsec1-dev \
libxslt1-dev \
python3-dev \
libffi-dev \
libfreetype6-dev \
libgeos-dev \
@@ -114,12 +116,19 @@ COPY requirements requirements
RUN pip install -r requirements/pip.txt
RUN pip install -r requirements/edx/base.txt
# Install node and node modules
# Install node and npm
RUN nodeenv /edx/app/edxapp/nodeenv --node=16.14.0 --prebuilt
RUN npm install -g npm@8.5.x
# This script is used by an npm post-install hook.
# We copy it into the image now so that it will be available when we run `npm install` in the next step.
# The script itself will copy certain modules into some uber-legacy parts of edx-platform which still use RequireJS.
COPY scripts/copy-node-modules.sh scripts/copy-node-modules.sh
# Install node modules
COPY package.json package.json
COPY package-lock.json package-lock.json
RUN npm set progress=false && npm install
RUN npm set progress=false && npm ci
# The builder-development stage is a temporary stage that installs python modules required for development purposes
# The built artifacts from this stage are then copied to the development stage.

View File

@@ -1,9 +1,10 @@
# Do things in edx-platform
.PHONY: api-docs-sphinx api-docs base-requirements check-types clean \
.PHONY: base-requirements check-types clean \
compile-requirements detect_changed_source_translations dev-requirements \
docker_auth docker_build docker_push docker_tag docs extract_translations \
guides help lint-imports local-requirements pre-requirements pull \
pull_translations push_translations requirements shell swagger \
docker_auth docker_build docker_tag_build_push_lms docker_tag_build_push_lms_dev \
docker_tag_build_push_cms docker_tag_build_push_cms_dev docs extract_translations \
guides help lint-imports local-requirements migrate migrate-lms migrate-cms \
pre-requirements pull pull_translations push_translations requirements shell swagger \
technical-docs test-requirements ubuntu-requirements upgrade-package upgrade
# Careful with mktemp syntax: it has to work on Mac and Ubuntu, which have differences.
@@ -23,24 +24,17 @@ clean: ## archive and delete most git-ignored files
tar xf $(PRIVATE_FILES)
rm $(PRIVATE_FILES)
SWAGGER = docs/swagger.yaml
SWAGGER = docs/lms-openapi.yaml
docs: api-docs guides technical-docs ## build all the developer documentation for this repository
docs: guides technical-docs ## build all the developer documentation for this repository
swagger: ## generate the swagger.yaml file
DJANGO_SETTINGS_MODULE=docs.docs_settings python manage.py lms generate_swagger --generator-class=edx_api_doc_tools.ApiSchemaGenerator -o $(SWAGGER)
api-docs-sphinx: swagger ## generate the sphinx source files for api-docs
rm -f docs/api/gen/*
python docs/sw2sphinxopenapi.py $(SWAGGER) docs/api/gen
api-docs: api-docs-sphinx ## build the REST api docs
cd docs/api; make html
technical-docs: ## build the technical docs
$(MAKE) -C docs/technical html
guides: ## build the developer guide docs
guides: swagger ## build the developer guide docs
cd docs/guides; make clean html
extract_translations: ## extract localizable strings from sources
@@ -101,12 +95,13 @@ shell: ## launch a bash shell in a Docker container with all edx-platform depend
# Order is very important in this list: files must appear after everything they include!
REQ_FILES = \
requirements/edx/coverage \
requirements/edx/doc \
requirements/edx/paver \
requirements/edx-sandbox/py38 \
requirements/edx/base \
requirements/edx/doc \
requirements/edx/testing \
requirements/edx/development \
requirements/edx/assets \
scripts/xblock/requirements
define COMMON_CONSTRAINTS_TEMP_COMMENT
@@ -124,6 +119,8 @@ compile-requirements: pre-requirements $(COMMON_CONSTRAINTS_TXT) ## Re-compile *
@# Bootstrapping: Rebuild pip and pip-tools first, and then install them
@# so that if there are any failures we'll know now, rather than the next
@# time someone tries to use the outputs.
sed '/^django-simple-history==/d' requirements/common_constraints.txt > requirements/common_constraints.tmp
mv requirements/common_constraints.tmp requirements/common_constraints.txt
pip-compile -v --allow-unsafe ${COMPILE_OPTS} -o requirements/pip.txt requirements/pip.in
pip install -r requirements/pip.txt
@@ -149,34 +146,40 @@ upgrade-package: ## update just one package to the latest usable release
check-types: ## run static type-checking tests
mypy
docker_build:
docker_auth:
echo "$$DOCKERHUB_PASSWORD" | docker login -u "$$DOCKERHUB_USERNAME" --password-stdin
docker_build: docker_auth
DOCKER_BUILDKIT=1 docker build . --build-arg SERVICE_VARIANT=lms --build-arg SERVICE_PORT=8000 --target development -t openedx/lms-dev
DOCKER_BUILDKIT=1 docker build . --build-arg SERVICE_VARIANT=lms --build-arg SERVICE_PORT=8000 --target production -t openedx/lms
DOCKER_BUILDKIT=1 docker build . --build-arg SERVICE_VARIANT=cms --build-arg SERVICE_PORT=8010 --target development -t openedx/cms-dev
DOCKER_BUILDKIT=1 docker build . --build-arg SERVICE_VARIANT=cms --build-arg SERVICE_PORT=8010 --target production -t openedx/cms
docker_tag: docker_build
docker tag openedx/lms openedx/lms:${GITHUB_SHA}
docker tag openedx/lms-dev openedx/lms-dev:${GITHUB_SHA}
docker tag openedx/cms openedx/cms:${GITHUB_SHA}
docker tag openedx/cms-dev openedx/cms-dev:${GITHUB_SHA}
docker_tag_build_push_lms: docker_auth
docker buildx build -t openedx/lms:latest -t openedx/lms:${GITHUB_SHA} --platform linux/amd64,linux/arm64 --build-arg SERVICE_VARIANT=lms --build-arg SERVICE_PORT=8000 --target production --push .
docker_auth:
echo "$$DOCKERHUB_PASSWORD" | docker login -u "$$DOCKERHUB_USERNAME" --password-stdin
docker_tag_build_push_lms_dev: docker_auth
docker buildx build -t openedx/lms-dev:latest -t openedx/lms-dev:${GITHUB_SHA} --platform linux/amd64,linux/arm64 --build-arg SERVICE_VARIANT=lms --build-arg SERVICE_PORT=8000 --target development --push .
docker_push: docker_tag docker_auth ## push to docker hub
docker push "openedx/lms:latest"
docker push "openedx/lms:${GITHUB_SHA}"
docker push "openedx/lms-dev:latest"
docker push "openedx/lms-dev:${GITHUB_SHA}"
docker push "openedx/cms:latest"
docker push "openedx/cms:${GITHUB_SHA}"
docker push "openedx/cms-dev:latest"
docker push "openedx/cms-dev:${GITHUB_SHA}"
docker_tag_build_push_cms: docker_auth
docker buildx build -t openedx/cms:latest -t openedx/cms:${GITHUB_SHA} --platform linux/amd64,linux/arm64 --build-arg SERVICE_VARIANT=cms --build-arg SERVICE_PORT=8010 --target production --push .
docker_tag_build_push_cms_dev: docker_auth
docker buildx build -t openedx/cms-dev:latest -t openedx/cms-dev:${GITHUB_SHA} --platform linux/amd64,linux/arm64 --build-arg SERVICE_VARIANT=cms --build-arg SERVICE_PORT=8010 --target development --push .
lint-imports:
lint-imports
migrate-lms:
python manage.py lms showmigrations --database default --traceback --pythonpath=.
python manage.py lms migrate --database default --traceback --pythonpath=.
migrate-cms:
python manage.py cms showmigrations --database default --traceback --pythonpath=.
python manage.py cms migrate --database default --noinput --traceback --pythonpath=.
migrate: migrate-lms migrate-cms
# WARNING (EXPERIMENTAL):
# This installs the Ubuntu requirements necessary to make `pip install` and some other basic
# dev commands to pass. This is not necessarily everything needed to get a working edx-platform.

View File

@@ -25,8 +25,8 @@ platform. Functionally, the edx-platform repository provides two services:
* CMS (Content Management Service), which powers Open edX Studio, the platform's learning content authoring environment; and
* LMS (Learning Management Service), which delivers learning content.
Installation
************
Getting Started
***************
Installing and running an Open edX instance is not simple. We strongly
recommend that you use a service provider to run the software for you. They
@@ -122,6 +122,13 @@ Contributions are welcome! The first step is to submit a signed
information it also contains guidelines for how to maintain high code
quality, which will make your contribution more likely to be accepted.
New features are accepted. Discussing your new ideas with the maintainers
before you write code will also increase the chances that your work is accepted.
Code of Conduct
***************
Please read the `Community Code of Conduct`_ for interacting with this repository.
Reporting Security Issues
*************************
@@ -131,3 +138,12 @@ security@edx.org.
.. _individual contributor agreement: https://openedx.org/cla
.. _CONTRIBUTING: https://github.com/openedx/.github/blob/master/CONTRIBUTING.md
.. _Community Code of Conduct: https://openedx.org/code-of-conduct/
People
******
The current maintainers of this repository can be found on `Backstage`_.
.. _Backstage: https://backstage.openedx.org/catalog/default/component/edx-platform

16
catalog-info.yaml Normal file
View File

@@ -0,0 +1,16 @@
# This file records information about this repo. Its use is described in OEP-55:
# https://open-edx-proposals.readthedocs.io/en/latest/processes/oep-0055-proc-project-maintainers.html
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: 'edx-platform'
description: "The monolith at the center of the Open edX platform"
links:
- url: "https://docs.openedx.org"
title: "Documentation"
icon: "Web"
spec:
owner: group:arch-bom
type: 'service'
lifecycle: 'production'

View File

@@ -1,2 +1 @@
# lint-amnesty, pylint: disable=missing-module-docstring
default_app_config = 'cms.djangoapps.api.apps.ApiConfig'

View File

@@ -3,7 +3,7 @@ URLs for the Studio API app
"""
from django.conf.urls import include
from django.urls import include
from django.urls import path
app_name = 'cms.djangoapps.api'

View File

@@ -8,7 +8,6 @@ from cms.djangoapps.contentstore.api.views import course_import, course_quality,
app_name = 'contentstore'
helper = "{0,1}"
urlpatterns = [
re_path(fr'^v0/import/{settings.COURSE_ID_PATTERN}/$',

View File

@@ -0,0 +1,672 @@
"""Views for assets"""
import json
import logging
import math
import re
from functools import partial
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseBadRequest, HttpResponseNotFound
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods, require_POST
from opaque_keys.edx.keys import AssetKey, CourseKey
from pymongo import ASCENDING, DESCENDING
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.util.date_utils import get_default_time_display
from common.djangoapps.util.json_request import JsonResponse
from openedx.core.djangoapps.contentserver.caching import del_cached_content
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from xmodule.contentstore.content import StaticContent # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.contentstore.django import contentstore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.exceptions import NotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from .exceptions import AssetNotFoundException, AssetSizeTooLargeException
from .utils import reverse_course_url, get_files_uploads_url
from .toggles import use_new_files_uploads_page
REQUEST_DEFAULTS = {
'page': 0,
'page_size': 50,
'sort': 'date_added',
'direction': '',
'asset_type': '',
'text_search': '',
}
def handle_assets(request, course_key_string=None, asset_key_string=None):
'''
The restful handler for assets.
It allows retrieval of all the assets (as an HTML page), as well as uploading new assets,
deleting assets, and changing the 'locked' state of an asset.
GET
html: return an html page which will show all course assets. Note that only the asset container
is returned and that the actual assets are filled in with a client-side request.
json: returns a page of assets. The following parameters are supported:
page: the desired page of results (defaults to 0)
page_size: the number of items per page (defaults to 50)
sort: the asset field to sort by (defaults to 'date_added')
direction: the sort direction (defaults to 'descending')
asset_type: the file type to filter items to (defaults to All)
text_search: string to filter results by file name (defaults to '')
POST
json: create or update an asset. The only updating that can be done is changing the lock state.
PUT
json: create or update an asset. The only updating that can be done is changing the lock state.
DELETE
json: delete an asset
'''
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
response_format = _get_response_format(request)
if _request_response_format_is_json(request, response_format):
if request.method == 'GET':
return _assets_json(request, course_key)
# POST, PUT, DELETE typically invoke this
asset_key = AssetKey.from_string(asset_key_string) if asset_key_string else None
return update_asset(request, course_key, asset_key)
elif request.method == 'GET': # assume html
return _asset_index(request, course_key)
return HttpResponseNotFound()
def get_asset_usage_path(request, course_key, asset_key_string):
"""
Get a list of units with ancestors that use given asset.
"""
course_key = CourseKey.from_string(course_key)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
asset_location = AssetKey.from_string(asset_key_string) if asset_key_string else None
store = modulestore()
usage_locations = []
static_path = StaticContent.get_static_path_from_location(asset_location)
verticals = store.get_items(
course_key,
qualifiers={
'category': 'vertical'
},
)
blocks = []
for vertical in verticals:
blocks.extend(vertical.get_children())
for block in blocks:
is_video_block = getattr(block, 'category', '') == 'video'
if is_video_block:
handout = getattr(block, 'handout', '')
if handout and str(asset_location) in handout:
unit = block.get_parent()
subsection = unit.get_parent()
subsection_display_name = getattr(subsection, 'display_name', '')
unit_display_name = getattr(unit, 'display_name', '')
xblock_display_name = getattr(block, 'display_name', '')
usage_locations.append(f'{subsection_display_name} - {unit_display_name} / {xblock_display_name}')
else:
data = getattr(block, 'data', '')
if static_path in data or str(asset_location) in data:
unit = block.get_parent()
subsection = unit.get_parent()
subsection_display_name = getattr(subsection, 'display_name', '')
unit_display_name = getattr(unit, 'display_name', '')
xblock_display_name = getattr(block, 'display_name', '')
usage_locations.append(f'{subsection_display_name} - {unit_display_name} / {xblock_display_name}')
return JsonResponse({'usage_locations': usage_locations})
def _get_response_format(request):
return request.GET.get('format') or request.POST.get('format') or 'html'
def _request_response_format_is_json(request, response_format):
return response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json')
def _asset_index(request, course_key):
'''
Display an editable asset library.
Supports start (0-based index into the list of assets) and max query parameters.
'''
course_block = modulestore().get_course(course_key)
if use_new_files_uploads_page(course_key):
return redirect(get_files_uploads_url(course_key))
return render_to_response('asset_index.html', {
'language_code': request.LANGUAGE_CODE,
'context_course': course_block,
'max_file_size_in_mbs': settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB,
'chunk_size_in_mbs': settings.UPLOAD_CHUNK_SIZE_IN_MB,
'max_file_size_redirect_url': settings.MAX_ASSET_UPLOAD_FILE_SIZE_URL,
'asset_callback_url': reverse_course_url('assets_handler', course_key)
})
def _assets_json(request, course_key):
'''
Display an editable asset library.
Supports start (0-based index into the list of assets) and max query parameters.
'''
request_options = _parse_request_to_dictionary(request)
filter_parameters = {}
if request_options['requested_asset_type']:
filters_are_invalid_error = _get_error_if_invalid_parameters(request_options['requested_asset_type'])
if filters_are_invalid_error is not None:
return filters_are_invalid_error
filter_parameters.update(_get_content_type_filter_for_mongo(request_options['requested_asset_type']))
if request_options['requested_text_search']:
filter_parameters.update(_get_displayname_search_filter_for_mongo(request_options['requested_text_search']))
sort_type_and_direction = _get_sort_type_and_direction(request_options)
requested_page_size = request_options['requested_page_size']
current_page = _get_current_page(request_options['requested_page'])
first_asset_to_display_index = _get_first_asset_index(current_page, requested_page_size)
query_options = {
'current_page': current_page,
'page_size': requested_page_size,
'sort': sort_type_and_direction,
'filter_params': filter_parameters
}
assets, total_count = _get_assets_for_page(course_key, query_options)
if request_options['requested_page'] > 0 and first_asset_to_display_index >= total_count and total_count > 0: # lint-amnesty, pylint: disable=chained-comparison
_update_options_to_requery_final_page(query_options, total_count)
current_page = query_options['current_page']
first_asset_to_display_index = _get_first_asset_index(current_page, requested_page_size)
assets, total_count = _get_assets_for_page(course_key, query_options)
last_asset_to_display_index = first_asset_to_display_index + len(assets)
assets_in_json_format = _get_assets_in_json_format(assets, course_key)
response_payload = {
'start': first_asset_to_display_index,
'end': last_asset_to_display_index,
'page': current_page,
'pageSize': requested_page_size,
'totalCount': total_count,
'assets': assets_in_json_format,
'sort': request_options['requested_sort'],
'direction': request_options['requested_sort_direction'],
'assetTypes': _get_requested_file_types_from_requested_filter(request_options['requested_asset_type']),
'textSearch': request_options['requested_text_search'],
}
return JsonResponse(response_payload)
def _parse_request_to_dictionary(request):
return {
'requested_page': int(_get_requested_attribute(request, 'page')),
'requested_page_size': int(_get_requested_attribute(request, 'page_size')),
'requested_sort': _get_requested_attribute(request, 'sort'),
'requested_sort_direction': _get_requested_attribute(request, 'direction'),
'requested_asset_type': _get_requested_attribute(request, 'asset_type'),
'requested_text_search': _get_requested_attribute(request, 'text_search'),
}
def _get_requested_attribute(request, attribute):
return request.GET.get(attribute, REQUEST_DEFAULTS.get(attribute))
def _get_error_if_invalid_parameters(requested_filter):
"""Function for returning error messages on filters"""
requested_file_types = _get_requested_file_types_from_requested_filter(requested_filter)
invalid_filters = []
# OTHER is not described in the settings file as a filter
all_valid_file_types = set(_get_files_and_upload_type_filters().keys())
all_valid_file_types.add('OTHER')
for requested_file_type in requested_file_types:
if requested_file_type not in all_valid_file_types:
invalid_filters.append(requested_file_type)
if invalid_filters:
error_message = {
'error_code': 'invalid_asset_type_filter',
'developer_message': 'The asset_type parameter to the request is invalid. '
'The {} filters are not described in the settings.FILES_AND_UPLOAD_TYPE_FILTERS '
'dictionary.'.format(invalid_filters)
}
return JsonResponse({'error': error_message}, status=400)
def _get_content_type_filter_for_mongo(requested_filter):
"""
Construct and return pymongo query dict for the given content type categories.
"""
requested_file_types = _get_requested_file_types_from_requested_filter(requested_filter)
type_filter = {
"$or": []
}
if 'OTHER' in requested_file_types:
type_filter["$or"].append(_get_mongo_expression_for_type_other())
requested_file_types.remove('OTHER')
type_filter["$or"].append(_get_mongo_expression_for_type_filter(requested_file_types))
return type_filter
def _get_mongo_expression_for_type_other():
"""
Construct and return pymongo expression dict for the 'OTHER' content type category.
"""
content_types = [ext for extensions in _get_files_and_upload_type_filters().values() for ext in extensions]
return {
'contentType': {
'$nin': content_types
}
}
def _get_mongo_expression_for_type_filter(requested_file_types):
"""
Construct and return pymongo expression dict for the named content type categories.
The named content categories are the keys of the FILES_AND_UPLOAD_TYPE_FILTERS setting that are not 'OTHER':
'Images', 'Documents', 'Audio', and 'Code'.
"""
content_types = []
files_and_upload_type_filters = _get_files_and_upload_type_filters()
for requested_file_type in requested_file_types:
content_types.extend(files_and_upload_type_filters[requested_file_type])
return {
'contentType': {
'$in': content_types
}
}
def _get_displayname_search_filter_for_mongo(text_search):
"""
Return a pymongo query dict for the given search string, using case insensitivity.
"""
filters = []
text_search_tokens = text_search.split()
for token in text_search_tokens:
escaped_token = re.escape(token)
filters.append({
'displayname': {
'$regex': escaped_token,
'$options': 'i',
},
})
return {
'$and': filters,
}
def _get_files_and_upload_type_filters():
return settings.FILES_AND_UPLOAD_TYPE_FILTERS
def _get_requested_file_types_from_requested_filter(requested_filter):
return requested_filter.split(',') if requested_filter else []
def _get_sort_type_and_direction(request_options):
sort_type = _get_mongo_sort_from_requested_sort(request_options['requested_sort'])
sort_direction = _get_sort_direction_from_requested_sort(request_options['requested_sort_direction'])
return [(sort_type, sort_direction)]
def _get_mongo_sort_from_requested_sort(requested_sort):
"""Function returns sorts dataset based on the key provided"""
if requested_sort == 'date_added':
sort = 'uploadDate'
elif requested_sort == 'display_name':
sort = 'displayname'
else:
sort = requested_sort
return sort
def _get_sort_direction_from_requested_sort(requested_sort_direction):
if requested_sort_direction.lower() == 'asc':
return ASCENDING
return DESCENDING
def _get_current_page(requested_page):
return max(requested_page, 0)
def _get_first_asset_index(current_page, page_size):
return current_page * page_size
def _get_assets_for_page(course_key, options):
"""returns course content for given course and options"""
current_page = options['current_page']
page_size = options['page_size']
sort = options['sort']
filter_params = options['filter_params'] if options['filter_params'] else None
start = current_page * page_size
return contentstore().get_all_content_for_course(
course_key, start=start, maxresults=page_size, sort=sort, filter_params=filter_params
)
def _update_options_to_requery_final_page(query_options, total_asset_count):
"""sets current_page value based on asset count and page_size"""
query_options['current_page'] = int(math.floor((total_asset_count - 1) / query_options['page_size']))
def _get_assets_in_json_format(assets, course_key):
"""returns assets information in JSON Format"""
assets_in_json_format = []
for asset in assets:
thumbnail_asset_key = _get_thumbnail_asset_key(asset, course_key)
asset_is_locked = asset.get('locked', False)
asset_file_size = asset.get('length', None)
asset_in_json = get_asset_json(
asset['displayname'],
asset['contentType'],
asset['uploadDate'],
asset['asset_key'],
thumbnail_asset_key,
asset_is_locked,
course_key,
asset_file_size,
)
assets_in_json_format.append(asset_in_json)
return assets_in_json_format
def update_course_run_asset(course_key, upload_file):
"""returns contents of the uploaded file"""
course_exists_response = _get_error_if_course_does_not_exist(course_key)
if course_exists_response is not None:
return course_exists_response
file_metadata = _get_file_metadata_as_dictionary(upload_file)
is_file_too_large = _check_file_size_is_too_large(file_metadata)
if is_file_too_large:
error_message = _get_file_too_large_error_message(file_metadata['filename'])
raise AssetSizeTooLargeException(error_message)
content, temporary_file_path = _get_file_content_and_path(file_metadata, course_key)
(thumbnail_content, thumbnail_location) = contentstore().generate_thumbnail(content,
tempfile_path=temporary_file_path)
# delete cached thumbnail even if one couldn't be created this time (else the old thumbnail will continue to show)
del_cached_content(thumbnail_location)
if _check_thumbnail_uploaded(thumbnail_content):
content.thumbnail_location = thumbnail_location
contentstore().save(content)
del_cached_content(content.location)
return content
@require_POST
@ensure_csrf_cookie
@login_required
def _upload_asset(request, course_key):
"""uploads the file in request and returns JSON response"""
course_exists_error = _get_error_if_course_does_not_exist(course_key)
if course_exists_error is not None:
return course_exists_error
if course_key.deprecated:
return JsonResponse({'error': 'Uploading assets for the legacy course is not available.'}, status=400)
# compute a 'filename' which is similar to the location formatting, we're
# using the 'filename' nomenclature since we're using a FileSystem paradigm
# here. We're just imposing the Location string formatting expectations to
# keep things a bit more consistent
upload_file = request.FILES['file']
try:
content = update_course_run_asset(course_key, upload_file)
except AssetSizeTooLargeException as exception:
return JsonResponse({'error': str(exception)}, status=413)
# readback the saved content - we need the database timestamp
readback = contentstore().find(content.location)
locked = getattr(content, 'locked', False)
length = getattr(content, 'length', None)
return JsonResponse({
'asset': get_asset_json(
content.name,
content.content_type,
readback.last_modified_at,
content.location,
content.thumbnail_location,
locked,
course_key,
length,
),
'msg': _('Upload completed')
})
def _get_error_if_course_does_not_exist(course_key): # lint-amnesty, pylint: disable=missing-function-docstring
try:
modulestore().get_course(course_key)
except ItemNotFoundError:
logging.error('Could not find course: %s', course_key)
return HttpResponseBadRequest()
def _get_file_metadata_as_dictionary(upload_file): # lint-amnesty, pylint: disable=missing-function-docstring
# compute a 'filename' which is similar to the location formatting; we're
# using the 'filename' nomenclature since we're using a FileSystem paradigm
# here; we're just imposing the Location string formatting expectations to
# keep things a bit more consistent
return {
'upload_file': upload_file,
'filename': upload_file.name,
'mime_type': upload_file.content_type,
'upload_file_size': get_file_size(upload_file)
}
def get_file_size(upload_file):
"""returns the size of the uploaded file"""
# can be used for mocking test file sizes.
return upload_file.size
def _check_file_size_is_too_large(file_metadata):
"""verifies whether file size is greater than allowed file size"""
upload_file_size = file_metadata['upload_file_size']
maximum_file_size_in_megabytes = settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB
maximum_file_size_in_bytes = maximum_file_size_in_megabytes * 1000 ** 2
return upload_file_size > maximum_file_size_in_bytes
def _get_file_too_large_error_message(filename):
"""returns formatted error message for large files"""
return _(
'File {filename} exceeds maximum size of '
'{maximum_size_in_megabytes} MB.'
).format(
filename=filename,
maximum_size_in_megabytes=settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB,
)
def _get_file_content_and_path(file_metadata, course_key):
"""returns contents of the uploaded file and path for temporary uploaded file"""
content_location = StaticContent.compute_location(course_key, file_metadata['filename'])
upload_file_size = str(file_metadata['upload_file_size'])
upload_file = file_metadata['upload_file']
file_can_be_chunked = upload_file.multiple_chunks()
static_content_partial = partial(StaticContent, content_location, file_metadata['filename'],
file_metadata['mime_type'], length=upload_file_size)
if file_can_be_chunked:
content = static_content_partial(upload_file.chunks())
temporary_file_path = upload_file.temporary_file_path()
else:
content = static_content_partial(upload_file.read())
temporary_file_path = None
return content, temporary_file_path
def _check_thumbnail_uploaded(thumbnail_content):
"""returns whether thumbnail is None"""
return thumbnail_content is not None
def _get_thumbnail_asset_key(asset, course_key):
"""returns thumbnail asset key"""
# note, due to the schema change we may not have a 'thumbnail_location' in the result set
thumbnail_location = asset.get('thumbnail_location', None)
thumbnail_asset_key = None
if thumbnail_location:
thumbnail_path = thumbnail_location[4]
thumbnail_asset_key = course_key.make_asset_key('thumbnail', thumbnail_path)
return thumbnail_asset_key
# TODO: this method needs improvement. These view decorators should be at the top in an actual view method,
# but this is just a method called by the asset_handler. The asset_handler used by the public studio content API
# just ignores all of this stuff.
@require_http_methods(('DELETE', 'POST', 'PUT'))
@login_required
@ensure_csrf_cookie
def update_asset(request, course_key, asset_key):
"""
restful CRUD operations for a course asset.
Currently only DELETE, POST, and PUT methods are implemented.
asset_path_encoding: the odd /c4x/org/course/category/name repr of the asset (used by Backbone as the id)
"""
if request.method == 'DELETE':
try:
delete_asset(course_key, asset_key)
return JsonResponse()
except AssetNotFoundException:
return JsonResponse(status=404)
elif request.method in ('PUT', 'POST'):
if 'file' in request.FILES:
return _upload_asset(request, course_key)
# update existing asset
try:
modified_asset = json.loads(request.body.decode('utf8'))
except ValueError:
return HttpResponseBadRequest()
contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])
# delete the asset from the cache so we check the lock status the next time it is requested.
del_cached_content(asset_key)
return JsonResponse(modified_asset, status=201)
def _save_content_to_trash(content):
"""saves the content to trash"""
contentstore('trashcan').save(content)
def delete_asset(course_key, asset_key):
"""deletes the cached content based on asset key"""
content = _check_existence_and_get_asset_content(asset_key)
_save_content_to_trash(content)
_delete_thumbnail(content.thumbnail_location, course_key, asset_key)
contentstore().delete(content.get_id())
del_cached_content(content.location)
def _check_existence_and_get_asset_content(asset_key): # lint-amnesty, pylint: disable=missing-function-docstring
try:
content = contentstore().find(asset_key)
return content
except NotFoundError:
raise AssetNotFoundException # lint-amnesty, pylint: disable=raise-missing-from
def _delete_thumbnail(thumbnail_location, course_key, asset_key): # lint-amnesty, pylint: disable=missing-function-docstring
if thumbnail_location is not None:
# We are ignoring the value of the thumbnail_location-- we only care whether
# or not a thumbnail has been stored, and we can now easily create the correct path.
thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.block_id)
try:
thumbnail_content = contentstore().find(thumbnail_location)
_save_content_to_trash(thumbnail_content)
contentstore().delete(thumbnail_content.get_id())
del_cached_content(thumbnail_location)
except Exception: # pylint: disable=broad-except
logging.warning('Could not delete thumbnail: %s', thumbnail_location)
def get_asset_json(display_name, content_type, date, location, thumbnail_location, locked, course_key, file_size=None):
'''
Helper method for formatting the asset information to send to client.
'''
asset_url = StaticContent.serialize_asset_key_with_slash(location)
external_url = urljoin(configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL), asset_url)
portable_url = StaticContent.get_static_path_from_location(location)
return {
'display_name': display_name,
'content_type': content_type,
'date_added': get_default_time_display(date),
'url': asset_url,
'external_url': external_url,
'portable_url': portable_url,
'thumbnail': StaticContent.serialize_asset_key_with_slash(thumbnail_location) if thumbnail_location else None,
'locked': locked,
'static_full_url': StaticContent.get_canonicalized_asset_path(course_key, portable_url, '', []),
# needed for Backbone delete/update.
'id': str(location),
'file_size': file_size,
}

View File

@@ -18,10 +18,9 @@ from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.xml_block import XmlMixin
from cms.djangoapps.models.settings.course_grading import CourseGradingModel
from common.djangoapps.student import auth
from common.djangoapps.student.roles import CourseCreatorRole, OrgContentCreatorRole
import openedx.core.djangoapps.content_staging.api as content_staging_api
from .utils import reverse_course_url, reverse_library_url, reverse_usage_url
@@ -240,7 +239,6 @@ def import_staged_content_from_user_clipboard(parent_key: UsageKey, request) ->
if not user_clipboard:
# Clipboard is empty or expired/error/loading
return None, StaticFileNotices()
block_type = user_clipboard.content.block_type
olx_str = content_staging_api.get_staged_content_olx(user_clipboard.content.id)
static_files = content_staging_api.get_staged_content_static_files(user_clipboard.content.id)
node = etree.fromstring(olx_str)
@@ -248,35 +246,15 @@ def import_staged_content_from_user_clipboard(parent_key: UsageKey, request) ->
with store.bulk_operations(parent_key.course_key):
parent_descriptor = store.get_item(parent_key)
# Some blocks like drag-and-drop only work here with the full XBlock runtime loaded:
parent_xblock = _load_preview_block(request, parent_descriptor)
runtime = parent_xblock.runtime
# Generate the new ID:
id_generator = ImportIdGenerator(parent_key.context_key)
def_id = id_generator.create_definition(block_type, user_clipboard.source_usage_key.block_id)
usage_id = id_generator.create_usage(def_id)
keys = ScopeIds(None, block_type, def_id, usage_id)
# parse_xml is a really messy API. We pass both 'keys' and 'id_generator' and, depending on the XBlock, either
# one may be used to determine the new XBlock's usage key, and the other will be ignored. e.g. video ignores
# 'keys' and uses 'id_generator', but the default XBlock parse_xml ignores 'id_generator' and uses 'keys'.
# For children of this block, obviously only id_generator is used.
xblock_class = runtime.load_block_type(block_type)
# Note: if we find a case where any XBlock needs access to the block-specific static files that were saved to
# export_fs during copying, we could make them available here via runtime.resources_fs before calling parse_xml.
# However, currently the only known case for that is video block's transcript files, and those will
# automatically be "carried over" to the new XBlock even in a different course because the video ID is the same,
# and VAL will thus make the transcript available.
temp_xblock = xblock_class.parse_xml(node, runtime, keys, id_generator)
if xblock_class.has_children and temp_xblock.children:
raise NotImplementedError("We don't yet support pasting XBlocks with children")
temp_xblock.parent = parent_key
# Store a reference to where this block was copied from, in the 'copied_from_block' field (AuthoringMixin)
temp_xblock.copied_from_block = str(user_clipboard.source_usage_key)
# Save the XBlock into modulestore. We need to save the block and its parent for this to work:
new_xblock = store.update_item(temp_xblock, request.user.id, allow_not_found=True)
parent_xblock.children.append(new_xblock.location)
store.update_item(parent_xblock, request.user.id)
new_xblock = _import_xml_node_to_parent(
node,
parent_xblock,
store,
user_id=request.user.id,
slug_hint=user_clipboard.source_usage_key.block_id,
copied_from_block=str(user_clipboard.source_usage_key),
)
# Now handle static files that need to go into Files & Uploads:
notices = _import_files_into_course(
course_key=parent_key.context_key,
@@ -287,6 +265,80 @@ def import_staged_content_from_user_clipboard(parent_key: UsageKey, request) ->
return new_xblock, notices
def _import_xml_node_to_parent(
node,
parent_xblock: XBlock,
# The modulestore we're using
store,
# The ID of the user who is performing this operation
user_id: int,
# Hint to use as usage ID (block_id) for the new XBlock
slug_hint: str | None = None,
# UsageKey of the XBlock that this one is a copy of
copied_from_block: str | None = None,
) -> XBlock:
"""
Given an XML node representing a serialized XBlock (OLX), import it into modulestore 'store' as a child of the
specified parent block. Recursively copy children as needed.
"""
runtime = parent_xblock.runtime
parent_key = parent_xblock.scope_ids.usage_id
block_type = node.tag
# Generate the new ID:
id_generator = ImportIdGenerator(parent_key.context_key)
def_id = id_generator.create_definition(block_type, slug_hint)
usage_id = id_generator.create_usage(def_id)
keys = ScopeIds(None, block_type, def_id, usage_id)
# parse_xml is a really messy API. We pass both 'keys' and 'id_generator' and, depending on the XBlock, either
# one may be used to determine the new XBlock's usage key, and the other will be ignored. e.g. video ignores
# 'keys' and uses 'id_generator', but the default XBlock parse_xml ignores 'id_generator' and uses 'keys'.
# For children of this block, obviously only id_generator is used.
xblock_class = runtime.load_block_type(block_type)
# Note: if we find a case where any XBlock needs access to the block-specific static files that were saved to
# export_fs during copying, we could make them available here via runtime.resources_fs before calling parse_xml.
# However, currently the only known case for that is video block's transcript files, and those will
# automatically be "carried over" to the new XBlock even in a different course because the video ID is the same,
# and VAL will thus make the transcript available.
child_nodes = []
if not xblock_class.has_children:
# No children to worry about. The XML may contain child nodes, but they're not XBlocks.
temp_xblock = xblock_class.parse_xml(node, runtime, keys, id_generator)
else:
# We have to handle the children ourselves, because there are lots of complex interactions between
# * the vanilla XBlock parse_xml() method, and its lack of API for "create and save a new XBlock"
# * the XmlMixin version of parse_xml() which only works with ImportSystem, not modulestore or the v2 runtime
# * the modulestore APIs for creating and saving a new XBlock, which work but don't support XML parsing.
# We can safely assume that if the XBLock class supports children, every child node will be the XML
# serialization of a child block, in order. For blocks that don't support children, their XML content/nodes
# could be anything (e.g. HTML, capa)
node_without_children = etree.Element(node.tag, **node.attrib)
if issubclass(xblock_class, XmlMixin):
# Hack: XBlocks that use "XmlMixin" have their own XML parsing behavior, and in particular if they encounter
# an XML node that has no children and has only a "url_name" attribute, they'll try to load the XML data
# from an XML file in runtime.resources_fs. But that file doesn't exist here. So we set at least one
# additional attribute here to make sure that url_name is not the only attribute; otherwise in some cases,
# XmlMixin.parse_xml will try to load an XML file that doesn't exist, giving an error. The name and value
# of this attribute don't matter and should be ignored.
node_without_children.attrib["x-is-pointer-node"] = "no"
temp_xblock = xblock_class.parse_xml(node_without_children, runtime, keys, id_generator)
child_nodes = list(node)
if xblock_class.has_children and temp_xblock.children:
raise NotImplementedError("We don't yet support pasting XBlocks with children")
temp_xblock.parent = parent_key
if copied_from_block:
# Store a reference to where this block was copied from, in the 'copied_from_block' field (AuthoringMixin)
temp_xblock.copied_from_block = copied_from_block
# Save the XBlock into modulestore. We need to save the block and its parent for this to work:
new_xblock = store.update_item(temp_xblock, user_id, allow_not_found=True)
parent_xblock.children.append(new_xblock.location)
store.update_item(parent_xblock, user_id)
for child_node in child_nodes:
_import_xml_node_to_parent(child_node, new_xblock, store, user_id=user_id)
return new_xblock
def _import_files_into_course(
course_key: CourseKey,
staged_content_id: int,
@@ -377,15 +429,3 @@ def is_item_in_course_tree(item):
ancestor = ancestor.get_parent()
return ancestor is not None
def is_content_creator(user, org):
"""
Check if the user has the role to create content.
This function checks if the User has role to create content
or if the org is supplied, it checks for Org level course content
creator.
"""
return (auth.user_has_role(user, CourseCreatorRole()) or
auth.user_has_role(user, OrgContentCreatorRole(org=org)))

View File

@@ -0,0 +1,121 @@
"""A Command to Copy or uncopy V1 Content Libraries entires to be stored as v2 content libraries."""
import logging
import csv
from textwrap import dedent
from django.core.management import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from xmodule.modulestore.django import modulestore
from celery import group
from cms.djangoapps.contentstore.tasks import create_v2_library_from_v1_library, delete_v2_library_from_v1_library
from .prompt import query_yes_no
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy or uncopy V1 Content Libraries (default all) entires to be stored as v2 content libraries.
First Specify the uuid for the collection to store the content libraries in.
Specfiy --all for all libraries, library ids for specific libraries,
and -- file followed by the path for a list of libraries from a file.
Example usage:
$ ./manage.py cms copy_libraries_from_v1_to_v2 'collection_uuid' --all
$ ./manage.py cms copy_libraries_from_v1_to_v2 'collection_uuid' --all --uncopy
$ ./manage.py cms copy_libraries_from_v1_to_v2 'collection_uuid 'library-v1:edX+DemoX+Better_Library'
$ ./manage.py cms copy_libraries_from_v1_to_v2 'collection_uuid 'library-v1:edX+DemoX+Better_Library' --uncopy
$ ./manage.py cms copy_libraries_from_v1_to_v2
'11111111-2111-4111-8111-111111111111'
'./list_of--library-locators.csv --all
Note:
This Command Also produces an "output file" which contains the mapping of locators and the status of the copy.
"""
help = dedent(__doc__)
CONFIRMATION_PROMPT = "Reindexing all libraries might be a time consuming operation. Do you want to continue?"
def add_arguments(self, parser):
"""arguements for command"""
parser.add_argument(
'collection_uuid',
type=str,
help='the uuid for the collection to create the content library in.'
)
parser.add_argument(
'output_csv',
type=str,
nargs='?',
default=None,
help='a file path to write the tasks output to. Without this the result is simply logged.'
)
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Copy all libraries'
)
parser.add_argument(
'--uncopy',
action='store_true',
dest='uncopy',
help='Delete libraries specified'
)
parser.add_argument(
'library_ids',
nargs='*',
default=[],
help='a space-seperated list of v1 library ids to copy'
)
def _parse_library_key(self, raw_value):
""" Parses library key from string """
result = CourseKey.from_string(raw_value)
if not isinstance(result, LibraryLocator):
raise CommandError(f"Argument {raw_value} is not a library key")
return result
def handle(self, *args, **options): # lint-amnesty, pylint: disable=unused-argument
"""Parse args and generate tasks for copying content."""
if (not options['library_ids'] and not options['all']) or (options['library_ids'] and options['all']):
raise CommandError("copy_libraries_from_v1_to_v2 requires one or more <library_id>s or the --all flag.")
if options['all']:
store = modulestore()
if query_yes_no(self.CONFIRMATION_PROMPT, default="no"):
v1_library_keys = [
library.location.library_key.replace(branch=None) for library in store.get_libraries()
]
else:
return
else:
v1_library_keys = list(map(self._parse_library_key, options['library_ids']))
create_library_task_group = group([
delete_v2_library_from_v1_library.s(str(v1_library_key), options['collection_uuid'])
if options['uncopy']
else create_v2_library_from_v1_library.s(str(v1_library_key), options['collection_uuid'])
for v1_library_key in v1_library_keys
])
group_result = create_library_task_group.apply_async().get()
if options['output_csv']:
with open(options['output_csv'], 'w', encoding='utf-8', newline='') as file:
output_writer = csv.writer(file)
output_writer.writerow(["v1_library_id", "v2_library_id", "status", "error_msg"])
for result in group_result:
output_writer.writerow(result.values())
log.info(group_result)

View File

@@ -5,7 +5,7 @@ from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from cms.djangoapps.contentstore.xblock_services.xblock_service import delete_orphans
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import delete_orphans
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order

View File

@@ -0,0 +1,95 @@
"""A Command to delete V1 Content Libraries index entires."""
import logging
from textwrap import dedent
from django.core.management import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from xmodule.modulestore.django import modulestore
from celery import group
from cms.djangoapps.contentstore.tasks import delete_v1_library
from .prompt import query_yes_no
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Delete V1 Content Libraries (default all) index entires.
Specfiy --all for all libraries, or space-seperated library ids for specific libraries.
Note this will leave orphans behind in mongo. use mongo prune to clean them up.
Example usage:
./manage.py cms delete_v1_libraries 'library-v1:edx+eaa'
./manage.py cms delete_v1_libraries --all
Note:
This Command also produces an "output file" which contains the mapping of locators and the status of the copy.
"""
help = dedent(__doc__)
CONFIRMATION_PROMPT = "Deleting all libraries might be a time consuming operation. Do you want to continue?"
def add_arguments(self, parser):
"""arguements for command"""
parser.add_argument(
'library_ids',
nargs='*',
help='A space-seperated list of v1 library ids to delete'
)
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Delete all libraries'
)
parser.add_argument(
'output_csv',
nargs='?',
default=None,
help='a file path to write the tasks output to. Without this the result is simply logged.'
)
def _parse_library_key(self, raw_value):
""" Parses library key from string """
result = CourseKey.from_string(raw_value)
if not isinstance(result, LibraryLocator):
raise CommandError(f"Argument {raw_value} is not a library key")
return result
def handle(self, *args, **options): # lint-amnesty, pylint: disable=unused-argument
"""Parse args and generate tasks for deleting content."""
if (not options['library_ids'] and not options['all']) or (options['library_ids'] and options['all']):
raise CommandError("delete_v1_libraries requires one or more <library_id>s or the --all flag.")
if options['all']:
store = modulestore()
if query_yes_no(self.CONFIRMATION_PROMPT, default="no"):
v1_library_keys = [
library.location.library_key.replace(branch=None) for library in store.get_libraries()
]
else:
return
else:
v1_library_keys = list(map(self._parse_library_key, options['library_ids']))
delete_libary_task_group = group([
delete_v1_library.s(str(v1_library_key)) for v1_library_key in v1_library_keys
])
group_result = delete_libary_task_group.apply_async().get()
log.info(group_result)
if options['output_csv']:
with open(options['output_csv'][0], 'w', encoding='utf-8', newline='') as output_writer:
output_writer.writerow("v1_library_id", "v2_library_id", "status", "error_msg")
for result in group_result:
output_writer.write(result.keys())

View File

@@ -0,0 +1,125 @@
"""
A Command which, given a mapping of V1 to V2 Libraries,
edits all xblocks in courses which refer to the v1 library to point to the v2 library.
"""
import logging
import csv
from django.core.management import BaseCommand, CommandError
from celery import group
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from cms.djangoapps.contentstore.tasks import (
replace_all_library_source_blocks_ids_for_course,
validate_all_library_source_blocks_ids_for_course,
undo_all_library_source_blocks_ids_for_course
)
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py cms replace_v1_lib_refs_with_v2_in_courses '/path/to/library_mappings.csv'
$ ./manage.py cms replace_v1_lib_refs_with_v2_in_courses '/path/to/library_mappings.csv' --validate
$ ./manage.py cms replace_v1_lib_refs_with_v2_in_courses '/path/to/library_mappings.csv' --undo
"""
def add_arguments(self, parser):
parser.add_argument('file_path', type=str, help='Path to the CSV file.')
parser.add_argument('--validate', action='store_true', help='Validate previous runs of the command')
parser.add_argument('--undo', action='store_true', help='Validate previous runs of the command')
def replace_all_library_source_blocks_ids(self, v1_to_v2_lib_map):
"""A method to replace 'source_library_id' in all relevant blocks."""
courses = CourseOverview.get_all_courses()
# Use Celery to distribute the workload
tasks = group(
replace_all_library_source_blocks_ids_for_course.s(
course,
v1_to_v2_lib_map
)
for course in courses
)
results = tasks.apply_async()
for result in results.get():
if isinstance(result, Exception):
# Handle the task failure here
log.error("Task failed with error: %s", str(result))
continue
log.info(
"Completed replacing all v1 library source ids with v2 library source ids"
)
def validate(self, v1_to_v2_lib_map):
""" Validate that replace_all_library_source_blocks_ids was successful"""
courses = CourseOverview.get_all_courses()
tasks = group(validate_all_library_source_blocks_ids_for_course.s(course, v1_to_v2_lib_map) for course in courses) # lint-amnesty, pylint: disable=line-too-long
results = tasks.apply_async()
validation = set()
for result in results.get():
if isinstance(result, Exception):
# Handle the task failure here
log.error("Task failed with error: %s", str(result))
continue
else:
validation.update(result)
if validation.issubset(v1_to_v2_lib_map.values()):
log.info("Validation: All values in the input map are present in courses.")
else:
log.info(
"Validation Failed: There are unmapped v1 libraries."
)
def undo(self, v1_to_v2_lib_map):
""" undo the changes made by replace_all_library_source_blocks_ids"""
courses = CourseOverview.get_all_courses()
# Use Celery to distribute the workload
tasks = group(undo_all_library_source_blocks_ids_for_course.s(course, v1_to_v2_lib_map) for course in courses)
results = tasks.apply_async()
for result in results.get():
if isinstance(result, Exception):
# Handle the task failure here
log.error("Task failed with error: %s", str(result))
continue
log.info("Completed replacing all v2 library source ids with v1 library source ids. Undo Complete")
def handle(self, *args, **kwargs):
""" Parse arguments and begin command"""
file_path = kwargs['file_path']
v1_to_v2_lib_map = {}
try:
with open(file_path, 'r', encoding='utf-8') as csvfile:
if not file_path.endswith('.csv'):
raise CommandError('Invalid file format. Only CSV files are supported.')
csv_reader = csv.reader(csvfile)
for row in csv_reader:
if len(row) >= 2:
key = row[0].strip()
value = row[1].strip()
v1_to_v2_lib_map[key] = value
print("Data successfully imported as dictionary:")
except FileNotFoundError:
log.error("File not found at '%s'.", {file_path})
except Exception as e: # lint-amnesty, pylint: disable=broad-except
log.error("An error occurred: %s", {str(e)})
if kwargs['validate']:
self.validate(v1_to_v2_lib_map)
if kwargs['undo']:
self.undo(v1_to_v2_lib_map)
else:
self.replace_all_library_source_blocks_ids(v1_to_v2_lib_map)

View File

@@ -4,6 +4,7 @@ or with filename which starts with "._")
"""
from unittest import skip
from django.conf import settings
from django.core.management import call_command
from opaque_keys.edx.keys import CourseKey
@@ -20,6 +21,9 @@ from xmodule.modulestore.xml_importer import import_course_from_xml
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@skip("OldMongo Deprecation")
# This test worked only for Old Mongo
# Can later be converted to work with Split
class ExportAllCourses(ModuleStoreTestCase):
"""
Tests assets cleanup for all courses.

View File

@@ -4,7 +4,6 @@ Unittests for creating a course in an chosen modulestore
from io import StringIO
import ddt
from django.core.management import CommandError, call_command
from django.test import TestCase
@@ -40,27 +39,28 @@ class TestArgParsing(TestCase):
call_command('create_course', "mongo", "fake@example.com", "org", "course", "run")
@ddt.ddt
class TestCreateCourse(ModuleStoreTestCase):
"""
Unit tests for creating a course in either old mongo or split mongo via command line
"""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_all_stores_user_email(self, store):
def test_all_stores_user_email(self):
call_command(
"create_course",
store,
ModuleStoreEnum.Type.split,
str(self.user.email),
"org", "course", "run", "dummy-course-name"
)
new_key = modulestore().make_course_key("org", "course", "run")
self.assertTrue(
modulestore().has_course(new_key),
f"Could not find course in {store}"
f"Could not find course in {ModuleStoreEnum.Type.split}"
)
# pylint: disable=protected-access
self.assertEqual(store, modulestore()._get_modulestore_for_courselike(new_key).get_modulestore_type())
self.assertEqual(
ModuleStoreEnum.Type.split,
modulestore()._get_modulestore_for_courselike(new_key).get_modulestore_type()
)
def test_duplicate_course(self):
"""
@@ -85,8 +85,7 @@ class TestCreateCourse(ModuleStoreTestCase):
expected = "Course already exists"
self.assertIn(out.getvalue().strip(), expected)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_get_course_with_different_case(self, default_store):
def test_get_course_with_different_case(self):
"""
Tests that course can not be accessed with different case.
@@ -98,21 +97,20 @@ class TestCreateCourse(ModuleStoreTestCase):
org = 'org1'
number = 'course1'
run = 'run1'
with self.store.default_store(default_store):
lowercase_course_id = self.store.make_course_key(org, number, run)
with self.store.bulk_operations(lowercase_course_id, ignore_case=True):
# Create course with lowercase key & Verify that store returns course.
self.store.create_course(
lowercase_course_id.org,
lowercase_course_id.course,
lowercase_course_id.run,
self.user.id
)
course = self.store.get_course(lowercase_course_id)
self.assertIsNotNone(course, 'Course not found using lowercase course key.')
self.assertEqual(str(course.id), str(lowercase_course_id))
lowercase_course_id = self.store.make_course_key(org, number, run)
with self.store.bulk_operations(lowercase_course_id, ignore_case=True):
# Create course with lowercase key & Verify that store returns course.
self.store.create_course(
lowercase_course_id.org,
lowercase_course_id.course,
lowercase_course_id.run,
self.user.id
)
course = self.store.get_course(lowercase_course_id)
self.assertIsNotNone(course, 'Course not found using lowercase course key.')
self.assertEqual(str(course.id), str(lowercase_course_id))
# Verify store does not return course with different case.
uppercase_course_id = self.store.make_course_key(org.upper(), number.upper(), run.upper())
course = self.store.get_course(uppercase_course_id)
self.assertIsNone(course, 'Course should not be accessed with uppercase course id.')
# Verify store does not return course with different case.
uppercase_course_id = self.store.make_course_key(org.upper(), number.upper(), run.upper())
course = self.store.get_course(uppercase_course_id)
self.assertIsNone(course, 'Course should not be accessed with uppercase course id.')

View File

@@ -5,6 +5,7 @@ Test for export all courses.
import shutil
from tempfile import mkdtemp
from unittest import skip
from cms.djangoapps.contentstore.management.commands.export_all_courses import export_courses_to_output_path
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
@@ -13,6 +14,10 @@ from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-a
from xmodule.modulestore.tests.factories import CourseFactory # lint-amnesty, pylint: disable=wrong-import-order
@skip("OldMongo Deprecation")
# This test fails for split modulestre
# AttributeError: 'MixedModuleStore' object has no attribute 'collection'
# split module store has no 'collection' attribute.
class ExportAllCourses(ModuleStoreTestCase):
"""
Tests exporting all courses.

View File

@@ -23,14 +23,6 @@ class TestFixNotFound(ModuleStoreTestCase):
with self.assertRaisesRegex(CommandError, msg):
call_command('fix_not_found')
def test_fix_not_found_non_split(self):
"""
The management command doesn't work on non split courses
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
with self.assertRaisesRegex(CommandError, "The owning modulestore does not support this command."):
call_command("fix_not_found", str(course.id))
def test_fix_not_found(self):
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
BlockFactory.create(category='chapter', parent_location=course.location)

View File

@@ -58,15 +58,6 @@ class TestForcePublish(SharedModuleStoreTestCase):
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', 'course-v1:org+course+run')
def test_force_publish_non_split(self):
"""
Test 'force_publish' command doesn't work on non split courses
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
errstring = 'The owning modulestore does not support this command.'
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', str(course.id))
class TestForcePublishModifications(ModuleStoreTestCase):
"""

View File

@@ -11,7 +11,6 @@ from django.core.management import call_command
from path import Path as path
from openedx.core.djangoapps.django_comment_common.utils import are_permissions_roles_seeded
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
@@ -73,23 +72,3 @@ class TestImport(ModuleStoreTestCase):
# Now load up the course with a similar course_id and verify it loads
call_command('import', self.content_dir, self.course_dir)
self.assertIsNotNone(store.get_course(self.truncated_key))
def test_existing_course_with_different_modulestore(self):
"""
Checks that a course that originally existed in old mongo can be re-imported when
split is the default modulestore.
"""
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
call_command('import', self.content_dir, self.good_dir)
# Clear out the modulestore mappings, else when the next import command goes to create a destination
# course_key, it will find the existing course and return the mongo course_key. To reproduce TNL-1362,
# the destination course_key needs to be the one for split modulestore.
modulestore().mappings = {}
with modulestore().default_store(ModuleStoreEnum.Type.split):
call_command('import', self.content_dir, self.good_dir)
course = modulestore().get_course(self.base_course_key)
# With the bug, this fails because the chapter's course_key is the split mongo form,
# while the course's course_key is the old mongo form.
self.assertEqual(str(course.location.course_key), str(course.children[0].course_key))

View File

@@ -2,7 +2,8 @@
Contentstore API URLs.
"""
from django.urls import include, re_path
from django.urls import path
from django.urls import include
from .v0 import urls as v0_urls
from .v1 import urls as v1_urls
@@ -10,6 +11,6 @@ from .v1 import urls as v1_urls
app_name = 'cms.djangoapps.contentstore'
urlpatterns = [
re_path(r'^v0/', include(v0_urls)),
re_path(r'^v1/', include(v1_urls)),
path('v0/', include(v0_urls)),
path('v1/', include(v1_urls)),
]

View File

@@ -6,11 +6,14 @@ import json
import ddt
from django.test import override_settings
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from milestones.tests.utils import MilestonesTestCaseMixin
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.toggles import ENABLE_NEW_STUDIO_ADVANCED_SETTINGS_PAGE
@override_waffle_flag(ENABLE_NEW_STUDIO_ADVANCED_SETTINGS_PAGE, active=True)
@ddt.ddt
class CourseAdvanceSettingViewTest(CourseTestCase, MilestonesTestCaseMixin):
"""

View File

@@ -8,12 +8,15 @@ from urllib.parse import urlencode
import ddt
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from xmodule.modulestore.tests.factories import BlockFactory
from xmodule.tabs import CourseTabList
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.toggles import ENABLE_NEW_STUDIO_CUSTOM_PAGES
@override_waffle_flag(ENABLE_NEW_STUDIO_CUSTOM_PAGES, active=True)
@ddt.ddt
class TabsAPITests(CourseTestCase):
"""

View File

@@ -79,6 +79,8 @@ class CourseTabListView(DeveloperErrorViewMixin, APIView):
```
"""
course_key = CourseKey.from_string(course_id)
if not use_new_custom_pages(course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
if not has_studio_read_access(request.user, course_key):
self.permission_denied(request)

View File

@@ -2,6 +2,7 @@
Serializers for v1 contentstore API.
"""
from .course_details import CourseDetailsSerializer
from .course_team import CourseTeamSerializer
from .grading import CourseGradingModelSerializer, CourseGradingSerializer
from .proctoring import (
LimitedProctoredExamSettingsSerializer,

View File

@@ -0,0 +1,20 @@
"""
API Serializers for course team
"""
from rest_framework import serializers
class UserCourseTeamSerializer(serializers.Serializer):
"""Serializer for user in course team"""
email = serializers.CharField()
id = serializers.IntegerField()
role = serializers.CharField()
username = serializers.CharField()
class CourseTeamSerializer(serializers.Serializer):
"""Serializer for course team context data"""
show_transfer_ownership_hint = serializers.BooleanField()
users = UserCourseTeamSerializer(many=True)
allow_actions = serializers.BooleanField()

View File

@@ -34,8 +34,10 @@ class CourseSettingsSerializer(serializers.Serializer):
is_prerequisite_courses_enabled = serializers.BooleanField()
language_options = serializers.ListField(child=serializers.ListField(child=serializers.CharField()))
lms_link_for_about_page = serializers.URLField()
licensing_enabled = serializers.BooleanField()
marketing_enabled = serializers.BooleanField()
mfe_proctored_exam_settings_url = serializers.CharField(required=False, allow_null=True, allow_blank=True)
platform_name = serializers.CharField()
possible_pre_requisite_courses = PossiblePreRequisiteCourseSerializer(required=False, many=True)
short_description_editable = serializers.BooleanField()
show_min_grade_warning = serializers.BooleanField()

View File

@@ -1,5 +1,6 @@
""" Contenstore API v1 URLs. """
from django.urls import path
from django.urls import re_path
from django.conf import settings
@@ -7,15 +8,22 @@ from openedx.core.constants import COURSE_ID_PATTERN
from .views import (
CourseDetailsView,
CourseTeamView,
CourseGradingView,
CourseSettingsView,
ProctoredExamSettingsView,
ProctoringErrorsView,
xblock
xblock,
assets,
videos,
transcripts,
HelpUrlsView,
)
app_name = 'v1'
VIDEO_ID_PATTERN = r'(?:(?P<edx_video_id>[-\w]+))'
urlpatterns = [
re_path(
fr'^proctored_exam_settings/{COURSE_ID_PATTERN}$',
@@ -37,6 +45,11 @@ urlpatterns = [
CourseDetailsView.as_view(),
name="course_details"
),
re_path(
fr'^course_team/{COURSE_ID_PATTERN}$',
CourseTeamView.as_view(),
name="course_team"
),
re_path(
fr'^course_grading/{COURSE_ID_PATTERN}$',
CourseGradingView.as_view(),
@@ -46,4 +59,37 @@ urlpatterns = [
fr'^xblock/{settings.COURSE_ID_PATTERN}/{settings.USAGE_KEY_PATTERN}?$',
xblock.XblockView.as_view(), name='studio_content'
),
re_path(
fr'^file_assets/{settings.COURSE_ID_PATTERN}/{settings.ASSET_KEY_PATTERN}?$',
assets.AssetsView.as_view(), name='studio_content_assets'
),
re_path(
fr'^videos/uploads/{settings.COURSE_ID_PATTERN}/{VIDEO_ID_PATTERN}?$',
videos.VideosView.as_view(), name='studio_content_videos_uploads'
),
re_path(
fr'^videos/images/{settings.COURSE_ID_PATTERN}/{VIDEO_ID_PATTERN}?$',
videos.VideoImagesView.as_view(), name='studio_content_videos_images'
),
re_path(
fr'^videos/encodings/{settings.COURSE_ID_PATTERN}$',
videos.VideoEncodingsDownloadView.as_view(), name='studio_content_videos_encodings'
),
path(
'videos/features/',
videos.VideoFeaturesView.as_view(), name='studio_content_videos_features'
),
re_path(
fr'^videos/upload_link/{settings.COURSE_ID_PATTERN}$',
videos.UploadLinkView.as_view(), name='studio_content_videos_upload_link'
),
re_path(
fr'^video_transcripts/{settings.COURSE_ID_PATTERN}$',
transcripts.TranscriptView.as_view(), name='studio_content_video_transcripts'
),
path(
'help_urls',
HelpUrlsView.as_view(),
name="help_urls"
),
]

View File

@@ -2,7 +2,11 @@
Views for v1 contentstore API.
"""
from .course_details import CourseDetailsView
from .course_team import CourseTeamView
from .grading import CourseGradingView
from .proctoring import ProctoredExamSettingsView, ProctoringErrorsView
from .settings import CourseSettingsView
from .xblock import XblockView
from .assets import AssetsView
from .videos import VideosView
from .help_urls import HelpUrlsView

View File

@@ -0,0 +1,58 @@
"""
Public rest API endpoints for the Studio Content API Assets.
"""
import logging
from rest_framework.generics import RetrieveUpdateDestroyAPIView, CreateAPIView
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from common.djangoapps.util.json_request import expect_json_in_class_view
from ....api import course_author_access_required
from cms.djangoapps.contentstore.asset_storage_handlers import handle_assets
import cms.djangoapps.contentstore.toggles as contentstore_toggles
log = logging.getLogger(__name__)
toggles = contentstore_toggles
@view_auth_classes()
class AssetsView(DeveloperErrorViewMixin, RetrieveUpdateDestroyAPIView, CreateAPIView):
"""
public rest API endpoints for the Studio Content API Assets.
course_key: required argument, needed to authorize course authors and identify the asset.
asset_key_string: required argument, needed to identify the asset.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@course_author_access_required
@expect_json_in_class_view
def retrieve(self, request, course_key): # pylint: disable=arguments-differ
return handle_assets(request, course_key.html_id())
@csrf_exempt
@course_author_access_required
def create(self, request, course_key): # pylint: disable=arguments-differ
return handle_assets(request, course_key.html_id())
@course_author_access_required
@expect_json_in_class_view
def update(self, request, course_key, asset_key_string): # pylint: disable=arguments-differ
return handle_assets(request, course_key.html_id(), asset_key_string)
@course_author_access_required
@expect_json_in_class_view
def destroy(self, request, course_key, asset_key_string): # pylint: disable=arguments-differ
return handle_assets(request, course_key.html_id(), asset_key_string)

View File

@@ -0,0 +1,74 @@
""" API Views for course team """
import edx_api_doc_tools as apidocs
from opaque_keys.edx.keys import CourseKey
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from cms.djangoapps.contentstore.utils import get_course_team
from common.djangoapps.student.auth import STUDIO_VIEW_USERS, get_user_permissions
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, verify_course_exists, view_auth_classes
from ..serializers import CourseTeamSerializer
@view_auth_classes(is_authenticated=True)
class CourseTeamView(DeveloperErrorViewMixin, APIView):
"""
View for getting data for course team.
"""
@apidocs.schema(
parameters=[
apidocs.string_parameter("course_id", apidocs.ParameterLocation.PATH, description="Course ID"),
],
responses={
200: CourseTeamSerializer,
401: "The requester is not authenticated.",
403: "The requester cannot access the specified course.",
404: "The requested course does not exist.",
},
)
@verify_course_exists()
def get(self, request: Request, course_id: str):
"""
Get all CMS users who are editors for the specified course.
**Example Request**
GET /api/contentstore/v1/course_team/{course_id}
**Response Values**
If the request is successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response contains a single dict that contains keys that
are the course's team info.
**Example Response**
```json
{
"show_transfer_ownership_hint": true,
"users": [
{
"email": "edx@example.com",
"id": "3",
"role": "instructor",
"username": "edx"
},
],
"allow_actions": true
}
```
"""
user = request.user
course_key = CourseKey.from_string(course_id)
user_perms = get_user_permissions(user, course_key)
if not user_perms & STUDIO_VIEW_USERS:
self.permission_denied(request)
course_team_context = get_course_team(user, course_key, user_perms)
serializer = CourseTeamSerializer(course_team_context)
return Response(serializer.data)

View File

@@ -0,0 +1,44 @@
""" API Views for help tokens """
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.lib.api.view_utils import view_auth_classes
from ....utils import get_help_urls
@view_auth_classes(is_authenticated=True)
class HelpUrlsView(APIView):
"""
View for getting all help urls.
"""
def get(self, request: Request):
"""
Get an help url.
**Example Request**
GET /api/contentstore/v1/help_urls
**Response Values**
If the request is successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response contains a single dict that contains keys for
pages and locales
**Example Response**
```json
{
"default": "http://edx.readthedocs.io/projects/.../index.html",
"home": "http://edx.readthedocs.io/projects/.../CA_get_started_Studio.html",
"develop_course": "http://edx.readthedocs.io/projects/.../developing_course/index.html",
...
}
```
"""
data = get_help_urls()
return Response(data)

View File

@@ -74,9 +74,11 @@ class CourseSettingsView(DeveloperErrorViewMixin, APIView):
],
...
],
"licensing_enabled": false,
"lms_link_for_about_page": "http://localhost:18000/courses/course-v1:edX+E2E-101+course/about",
"marketing_enabled": true,
"mfe_proctored_exam_settings_url": "",
"platform_name": "edX",
"possible_pre_requisite_courses": [
{
"course_key": "course-v1:edX+M12+2T2023",
@@ -108,6 +110,8 @@ class CourseSettingsView(DeveloperErrorViewMixin, APIView):
'can_show_certificate_available_date_field': can_show_certificate_available_date_field(course_block),
'course_display_name': course_block.display_name,
'course_display_name_with_default': course_block.display_name_with_default,
'platform_name': settings.PLATFORM_NAME,
'licensing_enabled': settings.FEATURES.get("LICENSING", False),
'use_v2_cert_display_settings': settings.FEATURES.get("ENABLE_V2_CERT_DISPLAY_SETTINGS", False),
})

View File

@@ -0,0 +1,281 @@
"""
Tests for the xblock view of the Studio Content API. This tests only the view itself,
not the underlying Xblock service.
It checks that the assets_handler method of the Xblock service is called with the expected parameters.
"""
from unittest.mock import patch
from django.http import JsonResponse
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from cms.djangoapps.contentstore.tests.test_utils import AuthorizeStaffTestCase
ASSET_KEY_STRING = "asset-v1:dede+aba+weagi+type@asset+block@_0e37192a-42c4-441e-a3e1-8e40ec304e2e.jpg"
class AssetsViewTestCase(AuthorizeStaffTestCase):
"""
This base class supports tests with the various HTTP methods (GET, POST, PUT, PATCH, and DELETE).
Tests for each such message are organized by classes that derive from this one (e.g., XblockViewGetTest).
Each derived class supplies get_test_data() to govern what goes into the body of the HTTP request.
Each derived class optionally overrides get_url_params() to govern request parameter values.
Additionally, each derived class supplies send_request() to bring it all together when making a request.
"""
def get_test_data(self):
raise NotImplementedError("get_test_data must be implemented by subclasses")
def get_url_params(self):
"""
Returns a dictionary of parameters to be used in the url that includes course_id and usage_key_string.
Override this method if you don't want to use the default values.
"""
return {"course_id": self.get_course_key_string(), "usage_key_string": ASSET_KEY_STRING}
def get_url(self, _course_id=None):
return reverse(
"cms.djangoapps.contentstore:v1:studio_content_assets",
kwargs=self.get_url_params(),
)
def send_request(self, _url, _data):
raise NotImplementedError("send_request must be implemented by subclasses")
@patch(
"cms.djangoapps.contentstore.rest_api.v1.views.assets.handle_assets",
return_value=JsonResponse(
{
"locator": ASSET_KEY_STRING,
"courseKey": AuthorizeStaffTestCase.get_course_key_string(),
}
),
)
@patch(
"cms.djangoapps.contentstore.rest_api.v1.views.xblock.toggles.use_studio_content_api",
return_value=True,
)
def make_request(
self,
mock_use_studio_content_api,
mock_handle_assets,
run_assertions=None,
course_id=None,
data=None,
):
"""
Note that the actual assets handler is mocked out and not used here. Patches used with this method serve to
test that routing of HTTP requests to the assets handler is correct, that the intended HTTP method has been
used, that data fed into the handler is as expected, and that data returned by the handler is as expected.
Inputs and outputs are handled through send_request() polymorphism, to cover all the HTTP methods in a
common fashion here.
Validations are through injection of run_assersions().
"""
url = self.get_url()
data = self.get_test_data()
response = self.send_request(url, data)
# run optional callback method with additional assertions
if run_assertions:
run_assertions(
response=response, mock_handle_assets=mock_handle_assets
)
return response
class AssetsViewGetTest(AssetsViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test GET operation on xblocks
"""
def get_url_params(self):
return {"course_id": self.get_course_key_string()}
def get_test_data(self):
return None
def assert_assets_handler_called(self, *, mock_handle_assets, response):
"""
This defines a callback method that is called after the request is made
and runs additional assertions on the response and mock_handle_assets.
"""
mock_handle_assets.assert_called_once()
passed_args = mock_handle_assets.call_args[0][0]
assert passed_args.method == "GET"
assert passed_args.path == self.get_url()
def send_request(self, url, data):
return self.client.get(url)
def test_api_behind_feature_flag(self):
# should return 404 if the feature flag is not enabled
url = self.get_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_assets_handler_called_with_correct_arguments(self):
self.client.login(
username=self.course_instructor.username, password=self.password
)
response = self.make_request( # pylint: disable=no-value-for-parameter
run_assertions=self.assert_assets_handler_called,
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["locator"] == ASSET_KEY_STRING
assert data["courseKey"] == self.get_course_key_string()
class AssetsViewPostTest(AssetsViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test POST operation on xblocks - Create a new xblock for a parent xblock
"""
def get_url_params(self):
return {"course_id": self.get_course_key_string()}
def get_test_data(self):
return {
"file": ASSET_KEY_STRING,
}
def assert_assets_handler_called(self, *, mock_handle_assets, response):
"""
This defines a callback method that is called after the request is made
and runs additional assertions on the response and mock_handle_assets.
"""
mock_handle_assets.assert_called_once()
passed_args = mock_handle_assets.call_args[0][0]
course_id = self.get_course_key_string()
assert passed_args.data.get("file") == ASSET_KEY_STRING
assert passed_args.method == "POST"
assert passed_args.path == self.get_url()
def send_request(self, url, data):
return self.client.post(url, data=data, format="multipart")
def test_api_behind_feature_flag(self):
# should return 404 if the feature flag is not enabled
url = self.get_url()
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_assets_handler_called_with_correct_arguments(self):
self.client.login(
username=self.course_instructor.username, password=self.password
)
response = self.make_request( # pylint: disable=no-value-for-parameter
run_assertions=self.assert_assets_handler_called,
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["locator"] == ASSET_KEY_STRING
assert data["courseKey"] == self.get_course_key_string()
class AssetsViewPutTest(AssetsViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test PUT operation on assets - update an asset's locked state
"""
def get_url_params(self):
return {"course_id": self.get_course_key_string(), "asset_key_string": ASSET_KEY_STRING}
def get_test_data(self):
return {
"locked": True,
}
def assert_assets_handler_called(self, *, mock_handle_assets, response):
"""
This defines a callback method that is called after the request is made
and runs additional assertions on the response and mock_handle_assets.
"""
mock_handle_assets.assert_called_once()
passed_args = mock_handle_assets.call_args[0][0]
course_id = self.get_course_key_string()
assert passed_args.data.get("locked") is True
assert passed_args.method == "PUT"
assert passed_args.path == self.get_url()
def send_request(self, url, data):
return self.client.put(url, data=data, format="json")
def test_api_behind_feature_flag(self):
# should return 404 if the feature flag is not enabled
url = self.get_url()
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_assets_handler_called_with_correct_arguments(self):
self.client.login(
username=self.course_instructor.username, password=self.password
)
response = self.make_request( # pylint: disable=no-value-for-parameter
run_assertions=self.assert_assets_handler_called,
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["locator"] == ASSET_KEY_STRING
assert data["courseKey"] == self.get_course_key_string()
class AssetsViewDeleteTest(AssetsViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test DELETE asset
"""
def get_url_params(self):
return {"course_id": self.get_course_key_string(), "asset_key_string": ASSET_KEY_STRING}
def get_test_data(self):
return None
def assert_assets_handler_called(self, *, mock_handle_assets, response):
"""
This defines a callback method that is called after the request is made
and runs additional assertions on the response and mock_handle_assets.
"""
mock_handle_assets.assert_called_once()
passed_args = mock_handle_assets.call_args[0][0]
assert passed_args.method == "DELETE"
assert passed_args.path == self.get_url()
def send_request(self, url, data):
return self.client.delete(url)
def test_api_behind_feature_flag(self):
# should return 404 if the feature flag is not enabled
url = self.get_url()
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_assets_handler_called_with_correct_arguments(self):
self.client.login(
username=self.course_instructor.username, password=self.password
)
response = self.make_request( # pylint: disable=no-value-for-parameter
run_assertions=self.assert_assets_handler_called,
)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["locator"] == ASSET_KEY_STRING
assert data["courseKey"] == self.get_course_key_string()

View File

@@ -0,0 +1,78 @@
"""
Unit tests for course team.
"""
import ddt
from django.urls import reverse
from rest_framework import status
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import UserFactory
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from ...mixins import PermissionAccessMixin
@ddt.ddt
class CourseTeamViewTest(CourseTestCase, PermissionAccessMixin):
"""
Tests for CourseTeamView.
"""
def setUp(self):
super().setUp()
self.url = reverse(
"cms.djangoapps.contentstore:v1:course_team",
kwargs={"course_id": self.course.id},
)
def get_expected_course_data(self, instructor=None, staff=None):
"""Utils is used to get expected data for course team"""
users = []
if instructor:
users.append({
"email": instructor.email,
"id": instructor.id,
"role": "instructor",
"username": instructor.username
})
if staff:
users.append({
"email": staff.email,
"id": staff.id,
"role": "staff",
"username": staff.username
})
return {
"show_transfer_ownership_hint": False,
"users": users,
"allow_actions": True,
}
def create_course_user_roles(self, course_id):
"""Get course staff and instructor roles user"""
instructor = UserFactory()
CourseInstructorRole(course_id).add_users(instructor)
staff = UserFactory()
CourseStaffRole(course_id).add_users(staff)
return instructor, staff
def test_course_team_response(self):
"""Check successful response content"""
response = self.client.get(self.url)
expected_response = self.get_expected_course_data()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(expected_response, response.data)
def test_users_response(self):
"""Test the response for users in the course."""
instructor, staff = self.create_course_user_roles(self.course.id)
response = self.client.get(self.url)
users_response = [dict(item) for item in response.data["users"]]
expected_response = self.get_expected_course_data(instructor, staff)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertListEqual(expected_response["users"], users_response)

View File

@@ -48,10 +48,12 @@ class CourseSettingsViewTest(CourseTestCase, PermissionAccessMixin):
"mfe_proctored_exam_settings_url": get_proctored_exam_settings_url(
self.course.id
),
"platform_name": settings.PLATFORM_NAME,
"short_description_editable": True,
"sidebar_html_enabled": False,
"show_min_grade_warning": False,
"upgrade_deadline": None,
"licensing_enabled": False,
"use_v2_cert_display_settings": False,
}

View File

@@ -17,10 +17,10 @@ from cms.djangoapps.contentstore.tests.test_utils import AuthorizeStaffTestCase
TEST_LOCATOR = "block-v1:dede+aba+weagi+type@problem+block@ba6327f840da49289fb27a9243913478"
class XblockViewTestCase(AuthorizeStaffTestCase):
class XBlockViewTestCase(AuthorizeStaffTestCase):
"""
This base class supports tests with the various HTTP methods (GET, POST, PUT, PATCH, and DELETE).
Tests for each such message are organized by classes that derive from this one (e.g., XblockViewGetTest).
Tests for each such message are organized by classes that derive from this one (e.g., XBlockViewGetTest).
Each derived class supplies get_test_data() to govern what goes into the body of the HTTP request.
Each derived class optionally overrides get_url_params() to govern request parameter values.
Additionally, each derived class supplies send_request() to bring it all together when making a request.
@@ -88,7 +88,7 @@ class XblockViewTestCase(AuthorizeStaffTestCase):
return response
class XblockViewGetTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
class XBlockViewGetTest(XBlockViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test GET operation on xblocks
"""
@@ -131,7 +131,7 @@ class XblockViewGetTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
assert data["courseKey"] == self.get_course_key_string()
class XblockViewPostTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
class XBlockViewPostTest(XBlockViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test POST operation on xblocks - Create a new xblock for a parent xblock
"""
@@ -190,7 +190,7 @@ class XblockViewPostTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
assert data["courseKey"] == self.get_course_key_string()
class XblockViewPutTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
class XBlockViewPutTest(XBlockViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test PUT operation on xblocks - update an xblock
"""
@@ -247,7 +247,7 @@ class XblockViewPutTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
assert data["courseKey"] == self.get_course_key_string()
class XblockViewPatchTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
class XBlockViewPatchTest(XBlockViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test PATCH operation on xblocks - update an xblock
"""
@@ -304,7 +304,7 @@ class XblockViewPatchTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
assert data["courseKey"] == self.get_course_key_string()
class XblockViewDeleteTest(XblockViewTestCase, ModuleStoreTestCase, APITestCase):
class XBlockViewDeleteTest(XBlockViewTestCase, ModuleStoreTestCase, APITestCase):
"""
Test DELETE operation on xblocks - delete an xblock
"""

View File

@@ -0,0 +1,62 @@
"""
Public rest API endpoints for the Studio Content API video assets.
"""
import logging
from rest_framework.generics import (
CreateAPIView,
RetrieveAPIView,
DestroyAPIView
)
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from common.djangoapps.util.json_request import expect_json_in_class_view
from ....api import course_author_access_required
from cms.djangoapps.contentstore.transcript_storage_handlers import (
upload_transcript,
delete_video_transcript_or_404,
handle_transcript_download,
)
import cms.djangoapps.contentstore.toggles as contentstore_toggles
log = logging.getLogger(__name__)
toggles = contentstore_toggles
@view_auth_classes()
class TranscriptView(DeveloperErrorViewMixin, CreateAPIView, RetrieveAPIView, DestroyAPIView):
"""
public rest API endpoints for the Studio Content API video transcripts.
course_key: required argument, needed to authorize course authors and identify the video.
edx_video_id: optional query parameter, needed to identify the transcript.
language_code: optional query parameter, needed to identify the transcript.
"""
def dispatch(self, request, *args, **kwargs):
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
@course_author_access_required
@expect_json_in_class_view
def create(self, request, course_key_string): # pylint: disable=arguments-differ
return upload_transcript(request)
@course_author_access_required
def retrieve(self, request, course_key_string): # pylint: disable=arguments-differ
"""
Get a video transcript. edx_video_id and language_code query parameters are required.
"""
return handle_transcript_download(request)
@course_author_access_required
def destroy(self, request, course_key_string): # pylint: disable=arguments-differ
"""
Delete a video transcript. edx_video_id and language_code query parameters are required.
"""
return delete_video_transcript_or_404(request)

View File

@@ -0,0 +1,159 @@
"""
Public rest API endpoints for the Studio Content API video assets.
"""
import logging
from rest_framework.generics import (
CreateAPIView,
RetrieveAPIView,
DestroyAPIView
)
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from common.djangoapps.util.json_request import expect_json_in_class_view
from ....api import course_author_access_required
from cms.djangoapps.contentstore.video_storage_handlers import (
handle_videos,
get_video_encodings_download,
handle_video_images,
enabled_video_features,
handle_generate_video_upload_link
)
import cms.djangoapps.contentstore.toggles as contentstore_toggles
log = logging.getLogger(__name__)
toggles = contentstore_toggles
@view_auth_classes()
class VideosView(DeveloperErrorViewMixin, CreateAPIView, RetrieveAPIView, DestroyAPIView):
"""
public rest API endpoints for the Studio Content API video assets.
course_key: required argument, needed to authorize course authors and identify the video.
video_id: required argument, needed to identify the video.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
@course_author_access_required
@expect_json_in_class_view
def create(self, request, course_key): # pylint: disable=arguments-differ
return handle_videos(request, course_key.html_id())
@course_author_access_required
def retrieve(self, request, course_key, edx_video_id=None): # pylint: disable=arguments-differ
return handle_videos(request, course_key.html_id(), edx_video_id)
@course_author_access_required
@expect_json_in_class_view
def destroy(self, request, course_key, edx_video_id): # pylint: disable=arguments-differ
return handle_videos(request, course_key.html_id(), edx_video_id)
@view_auth_classes()
class VideoImagesView(DeveloperErrorViewMixin, CreateAPIView):
"""
public rest API endpoint for uploading a video image.
course_key: required argument, needed to authorize course authors and identify the video.
video_id: required argument, needed to identify the video.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
@course_author_access_required
@expect_json_in_class_view
def create(self, request, course_key, edx_video_id=None): # pylint: disable=arguments-differ
return handle_video_images(request, course_key.html_id(), edx_video_id)
@view_auth_classes()
class VideoEncodingsDownloadView(DeveloperErrorViewMixin, RetrieveAPIView):
"""
public rest API endpoint providing a CSV report containing the encoded video URLs for video uploads.
course_key: required argument, needed to authorize course authors and identify relevant videos.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
@course_author_access_required
def retrieve(self, request, course_key): # pylint: disable=arguments-differ
return get_video_encodings_download(request, course_key.html_id())
@view_auth_classes()
class VideoFeaturesView(DeveloperErrorViewMixin, RetrieveAPIView):
"""
public rest API endpoint providing a list of enabled video features.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
def retrieve(self, request): # pylint: disable=arguments-differ
return enabled_video_features(request)
@view_auth_classes()
class UploadLinkView(DeveloperErrorViewMixin, CreateAPIView):
"""
public rest API endpoint providing a list of enabled video features.
"""
def dispatch(self, request, *args, **kwargs):
# TODO: probably want to refactor this to a decorator.
"""
The dispatch method of a View class handles HTTP requests in general
and calls other methods to handle specific HTTP methods.
We use this to raise a 404 if the content api is disabled.
"""
if not toggles.use_studio_content_api():
raise Http404
return super().dispatch(request, *args, **kwargs)
@csrf_exempt
@course_author_access_required
@expect_json_in_class_view
def create(self, request, course_key): # pylint: disable=arguments-differ
return handle_generate_video_upload_link(request, course_key.html_id())

View File

@@ -1,4 +1,6 @@
# lint-amnesty, pylint: disable=missing-module-docstring
"""
Public rest API endpoints for the Studio Content API.
"""
import logging
from rest_framework.generics import RetrieveUpdateDestroyAPIView, CreateAPIView
from django.views.decorators.csrf import csrf_exempt
@@ -9,18 +11,18 @@ from common.djangoapps.util.json_request import expect_json_in_class_view
from ....api import course_author_access_required
from cms.djangoapps.contentstore.xblock_services import xblock_service
from cms.djangoapps.contentstore.xblock_storage_handlers import view_handlers
import cms.djangoapps.contentstore.toggles as contentstore_toggles
log = logging.getLogger(__name__)
toggles = contentstore_toggles
handle_xblock = xblock_service.handle_xblock
handle_xblock = view_handlers.handle_xblock
@view_auth_classes()
class XblockView(DeveloperErrorViewMixin, RetrieveUpdateDestroyAPIView, CreateAPIView):
"""
public rest API endpoint for the Studio Content API.
Public rest API endpoints for the Studio Content API.
course_key: required argument, needed to authorize course authors.
usage_key_string (optional):
xblock identifier, for example in the form of "block-v1:<course id>+type@<type>+block@<block id>"

View File

@@ -177,8 +177,9 @@ def listen_for_xblock_published(sender, signal, **kwargs):
Publish XBLOCK_PUBLISHED signals onto the event bus.
"""
if settings.FEATURES.get("ENABLE_SEND_XBLOCK_EVENTS_OVER_BUS"):
topic = getattr(settings, "EVENT_BUS_XBLOCK_LIFECYCLE_TOPIC", "course-authoring-xblock-lifecycle")
get_producer().send(
signal=XBLOCK_PUBLISHED, topic='xblock-published',
signal=XBLOCK_PUBLISHED, topic=topic,
event_key_field='xblock_info.usage_key', event_data={'xblock_info': kwargs['xblock_info']},
event_metadata=kwargs['metadata'],
)
@@ -190,8 +191,9 @@ def listen_for_xblock_deleted(sender, signal, **kwargs):
Publish XBLOCK_DELETED signals onto the event bus.
"""
if settings.FEATURES.get("ENABLE_SEND_XBLOCK_EVENTS_OVER_BUS"):
topic = getattr(settings, "EVENT_BUS_XBLOCK_LIFECYCLE_TOPIC", "course-authoring-xblock-lifecycle")
get_producer().send(
signal=XBLOCK_DELETED, topic='xblock-deleted',
signal=XBLOCK_DELETED, topic=topic,
event_key_field='xblock_info.usage_key', event_data={'xblock_info': kwargs['xblock_info']},
event_metadata=kwargs['metadata'],
)
@@ -203,8 +205,9 @@ def listen_for_xblock_duplicated(sender, signal, **kwargs):
Publish XBLOCK_DUPLICATED signals onto the event bus.
"""
if settings.FEATURES.get("ENABLE_SEND_XBLOCK_EVENTS_OVER_BUS"):
topic = getattr(settings, "EVENT_BUS_XBLOCK_LIFECYCLE_TOPIC", "course-authoring-xblock-lifecycle")
get_producer().send(
signal=XBLOCK_DUPLICATED, topic='xblock-duplicated',
signal=XBLOCK_DUPLICATED, topic=topic,
event_key_field='xblock_info.usage_key', event_data={'xblock_info': kwargs['xblock_info']},
event_metadata=kwargs['metadata'],
)

View File

@@ -19,6 +19,7 @@ from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import SuspiciousOperation
from django.core.files import File
from django.db.transaction import atomic
from django.test import RequestFactory
from django.utils.text import get_valid_filename
from edx_django_utils.monitoring import (
@@ -30,9 +31,10 @@ from edx_django_utils.monitoring import (
from olxcleaner.exceptions import ErrorLevel
from olxcleaner.reporting import report_error_summary, report_errors
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from opaque_keys.edx.locator import LibraryLocator, LibraryLocatorV2
from organizations.api import add_organization_course, ensure_organization
from organizations.models import OrganizationCourse
from organizations.exceptions import InvalidOrganizationException
from organizations.models import Organization, OrganizationCourse
from path import Path as path
from pytz import UTC
from user_tasks.models import UserTaskArtifact, UserTaskStatus
@@ -47,13 +49,17 @@ from cms.djangoapps.contentstore.courseware_index import (
from cms.djangoapps.contentstore.storage import course_import_export_storage
from cms.djangoapps.contentstore.utils import initialize_permissions, reverse_usage_url, translation_language
from cms.djangoapps.models.settings.course_metadata import CourseMetadata
from common.djangoapps.course_action_state.models import CourseRerunState
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from common.djangoapps.util.monitoring import monitor_import_failure
from openedx.core.djangoapps.content.learning_sequences.api import key_supports_outlines
from openedx.core.djangoapps.content_libraries import api as v2contentlib_api
from openedx.core.djangoapps.course_apps.toggles import exams_ida_enabled
from openedx.core.djangoapps.discussions.tasks import update_unit_discussion_state_from_discussion_blocks
from openedx.core.djangoapps.embargo.models import CountryAccessRule, RestrictedCourse
from openedx.core.lib.blockstore_api import get_collection
from openedx.core.lib.extract_tar import safetar_extractall
from xmodule.contentstore.django import contentstore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.course_block import CourseFields # lint-amnesty, pylint: disable=wrong-import-order
@@ -69,6 +75,10 @@ from .outlines_regenerate import CourseOutlineRegenerate
from .toggles import bypass_olx_failure_enabled
from .utils import course_import_olx_validation_is_enabled
from cms.djangoapps.contentstore.utils import delete_course # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
User = get_user_model()
LOGGER = get_task_logger(__name__)
@@ -290,7 +300,7 @@ class CourseExportTask(UserTask): # pylint: disable=abstract-method
arguments_dict (dict): The arguments given to the task function
Returns:
text_type: The generated name
str: The generated name
"""
key = arguments_dict['course_key_string']
return f'Export of {key}'
@@ -425,7 +435,7 @@ class CourseImportTask(UserTask): # pylint: disable=abstract-method
arguments_dict (dict): The arguments given to the task function
Returns:
text_type: The generated name
str: The generated name
"""
key = arguments_dict['course_key_string']
filename = arguments_dict['archive_name']
@@ -790,7 +800,6 @@ def log_errors_to_artifact(errorstore, status):
def handle_course_import_exception(courselike_key, exception, status, known=True):
"""
Handle course import exception and fail task status.
Arguments:
courselike_key: A locator identifies a course resource.
exception: Exception object
@@ -808,3 +817,327 @@ def handle_course_import_exception(courselike_key, exception, status, known=True
if status.state != UserTaskStatus.FAILED:
status.fail(task_fail_message)
def _parse_organization(org_name):
"""Find a matching organization name, if one does not exist, specify that this is the *unspecfied* organization"""
try:
ensure_organization(org_name)
except InvalidOrganizationException:
return 'None'
return Organization.objects.get(short_name=org_name)
def copy_v1_user_roles_into_v2_library(v2_library_key, v1_library_key):
"""
write the access and edit permissions of a v1 library into a v2 library.
"""
def _get_users_by_access_level(v1_library_key):
"""
Get a permissions object for a library which contains a list of user IDs for every V2 permissions level,
based on V1 library roles.
The following mapping exists for a library:
V1 Library Role -> V2 Permission Level
LibraryUserRole -> READ_LEVEL
CourseStaffRole -> AUTHOR_LEVEL
CourseInstructorRole -> ADMIN_LEVEL
"""
permissions = {}
permissions[v2contentlib_api.AccessLevel.READ_LEVEL] = list(LibraryUserRole(v1_library_key).users_with_role())
permissions[v2contentlib_api.AccessLevel.AUTHOR_LEVEL] = list(CourseStaffRole(v1_library_key).users_with_role())
permissions[v2contentlib_api.AccessLevel.ADMIN_LEVEL] = list(
CourseInstructorRole(v1_library_key).users_with_role()
)
return permissions
permissions = _get_users_by_access_level(v1_library_key)
for access_level in permissions.keys(): # lint-amnesty, pylint: disable=consider-iterating-dictionary
for user in permissions[access_level]:
v2contentlib_api.set_library_user_permissions(v2_library_key, user, access_level)
def _create_copy_content_task(v2_library_key, v1_library_key):
"""
spin up a celery task to import the V1 Library's content into the V2 library.
This utalizes the fact that course and v1 library content is stored almost identically.
"""
return v2contentlib_api.import_blocks_create_task(v2_library_key, v1_library_key)
def _create_metadata(v1_library_key, collection_uuid):
"""instansiate an index for the V2 lib in the collection"""
print(collection_uuid)
store = modulestore()
v1_library = store.get_library(v1_library_key)
collection = get_collection(collection_uuid).uuid
# To make it easy, all converted libs are complex, meaning they can contain problems, videos, and text
library_type = 'complex'
org = _parse_organization(v1_library.location.library_key.org)
slug = v1_library.location.library_key.library
title = v1_library.display_name
# V1 libraries do not have descriptions.
description = ''
# permssions & license are most restrictive.
allow_public_learning = False
allow_public_read = False
library_license = '' # '' = ALL_RIGHTS_RESERVED
with atomic():
return v2contentlib_api.create_library(
collection,
library_type,
org,
slug,
title,
description,
allow_public_learning,
allow_public_read,
library_license
)
@shared_task(time_limit=30)
@set_code_owner_attribute
def delete_v2_library_from_v1_library(v1_library_key_string, collection_uuid):
"""
For a V1 Library, delete the matching v2 library, where the library is the result of the copy operation
This method relys on _create_metadata failling for LibraryAlreadyExists in order to obtain the v2 slug.
"""
v1_library_key = CourseKey.from_string(v1_library_key_string)
v2_library_key = LibraryLocatorV2.from_string('lib:' + v1_library_key.org + ':' + v1_library_key.course)
try:
v2contentlib_api.delete_library(v2_library_key)
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": v2_library_key,
"status": "SUCCESS",
"msg": None
}
except Exception as error: # lint-amnesty, pylint: disable=broad-except
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": v2_library_key,
"status": "FAILED",
"msg": f"Exception: {v2_library_key} did not delete: {error}"
}
@shared_task(time_limit=30)
@set_code_owner_attribute
def create_v2_library_from_v1_library(v1_library_key_string, collection_uuid):
"""
write the metadata, permissions, and content of a v1 library into a v2 library in the given collection.
"""
v1_library_key = CourseKey.from_string(v1_library_key_string)
LOGGER.info(f"Copy Library task created for library: {v1_library_key}")
try:
v2_library_metadata = _create_metadata(v1_library_key, collection_uuid)
except v2contentlib_api.LibraryAlreadyExists:
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": None,
"status": "FAILED",
"msg": f"Exception: LibraryAlreadyExists {v1_library_key_string} aleady exists"
}
try:
_create_copy_content_task(v2_library_metadata.key, v1_library_key)
except Exception as error: # lint-amnesty, pylint: disable=broad-except
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": str(v2_library_metadata.key),
"status": "FAILED",
"msg":
f"Could not import content from {v1_library_key_string} into {str(v2_library_metadata.key)}: {str(error)}"
}
try:
copy_v1_user_roles_into_v2_library(v2_library_metadata.key, v1_library_key)
except Exception as error: # lint-amnesty, pylint: disable=broad-except
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": str(v2_library_metadata.key),
"status": "FAILED",
"msg":
f"Could not copy permissions from {v1_library_key_string} into {str(v2_library_metadata.key)}: {str(error)}"
}
return {
"v1_library_id": v1_library_key_string,
"v2_library_id": str(v2_library_metadata.key),
"status": "SUCCESS",
"msg": None
}
@shared_task(time_limit=30)
@set_code_owner_attribute
def delete_v1_library(v1_library_key_string):
"""
Delete a v1 library index by key string.
"""
v1_library_key = CourseKey.from_string(v1_library_key_string)
if not modulestore().get_library(v1_library_key):
raise KeyError(f"Library not found: {v1_library_key}")
try:
delete_course(v1_library_key, ModuleStoreEnum.UserID.mgmt_command, True)
LOGGER.info(f"Deleted course {v1_library_key}")
except Exception as error: # lint-amnesty, pylint: disable=broad-except
return {
"v1_library_id": v1_library_key_string,
"status": "FAILED",
"msg":
f"Error occurred deleting library: {str(error)}"
}
return {
"v1_library_id": v1_library_key_string,
"status": "SUCCESS",
"msg": "SUCCESS"
}
@shared_task(time_limit=30)
@set_code_owner_attribute
def validate_all_library_source_blocks_ids_for_course(course, v1_to_v2_lib_map):
"""Search a Modulestore for all library source blocks in a course by querying mongo.
replace all source_library_ids with the corresponding v2 value from the map
"""
store = modulestore()
with store.bulk_operations(course.id):
visited = []
for branch in [ModuleStoreEnum.BranchName.draft, ModuleStoreEnum.BranchName.published]:
blocks = store.get_items(
course.id.for_branch(branch),
settings={'source_library_id': {'$exists': True}}
)
for xblock in blocks:
if xblock.source_library_id not in v1_to_v2_lib_map.values():
# lint-amnesty, pylint: disable=broad-except
raise Exception(
f'{xblock.source_library_id} in {course.id} is not found in mapping. Validation failed'
)
visited.append(xblock.source_library_id)
# return sucess
return visited
@shared_task(time_limit=30)
@set_code_owner_attribute
def replace_all_library_source_blocks_ids_for_course(course, v1_to_v2_lib_map): # lint-amnesty, pylint: disable=useless-return
"""Search a Modulestore for all library source blocks in a course by querying mongo.
replace all source_library_ids with the corresponding v2 value from the map.
This will trigger a publish on the course for every published library source block.
"""
store = modulestore()
with store.bulk_operations(course.id):
#for branch in [ModuleStoreEnum.BranchName.draft, ModuleStoreEnum.BranchName.published]:
draft_blocks, published_blocks = [
store.get_items(
course.id.for_branch(branch),
settings={'source_library_id': {'$exists': True}}
)
for branch in [ModuleStoreEnum.BranchName.draft, ModuleStoreEnum.BranchName.published]
]
published_dict = {block.location: block for block in published_blocks}
for draft_library_source_block in draft_blocks:
try:
new_source_id = str(v1_to_v2_lib_map[draft_library_source_block.source_library_id])
except KeyError:
#skip invalid keys
LOGGER.error(
'Key %s not found in mapping. Skipping block for course %s',
str({draft_library_source_block.source_library_id}),
str(course.id)
)
continue
# The publsihed branch should be updated as well as the draft branch
# This way, if authors "discard changes," they won't be reverted back to the V1 lib.
# However, we also don't want to publish the draft branch.
try:
if published_dict[draft_library_source_block.location] is not None:
#temporarily set the published version to be the draft & publish it.
temp = published_dict[draft_library_source_block.location]
temp.source_library_id = new_source_id
store.update_item(temp, None)
store.publish(temp.location, None)
draft_library_source_block.source_library_id = new_source_id
store.update_item(draft_library_source_block, None)
except KeyError:
#Warn, but just update the draft block if no published block for draft block.
LOGGER.warning(
'No matching published block for draft block %s',
str(draft_library_source_block.location)
)
draft_library_source_block.source_library_id = new_source_id
store.update_item(draft_library_source_block, None)
# return success
return
@shared_task(time_limit=30)
@set_code_owner_attribute
def undo_all_library_source_blocks_ids_for_course(course, v1_to_v2_lib_map): # lint-amnesty, pylint: disable=useless-return
"""Search a Modulestore for all library source blocks in a course by querying mongo.
replace all source_library_ids with the corresponding v1 value from the inverted map.
This is exists to undo changes made previously.
"""
v2_to_v1_lib_map = {v: k for k, v in v1_to_v2_lib_map.items()}
store = modulestore()
draft_blocks, published_blocks = [
store.get_items(
course.id.for_branch(branch),
settings={'source_library_id': {'$exists': True}}
)
for branch in [ModuleStoreEnum.BranchName.draft, ModuleStoreEnum.BranchName.published]
]
published_dict = {block.location: block for block in published_blocks}
for draft_library_source_block in draft_blocks:
try:
new_source_id = str(v2_to_v1_lib_map[draft_library_source_block.source_library_id])
except KeyError:
#skip invalid keys
LOGGER.error(
'Key %s not found in mapping. Skipping block for course %s',
str({draft_library_source_block.source_library_id}),
str(course.id)
)
continue
# The publsihed branch should be updated as well as the draft branch
# This way, if authors "discard changes," they won't be reverted back to the V1 lib.
# However, we also don't want to publish the draft branch.
try:
if published_dict[draft_library_source_block.location] is not None:
#temporarily set the published version to be the draft & publish it.
temp = published_dict[draft_library_source_block.location]
temp.source_library_id = new_source_id
store.update_item(temp, None)
store.publish(temp.location, None)
draft_library_source_block.source_library_id = new_source_id
store.update_item(draft_library_source_block, None)
except KeyError:
#Warn, but just update the draft block if no published block for draft block.
LOGGER.warning(
'No matching published block for draft block %s',
str(draft_library_source_block.location)
)
draft_library_source_block.source_library_id = new_source_id
store.update_item(draft_library_source_block, None)
# return success
return

View File

@@ -1080,6 +1080,36 @@ class ContentStoreTest(ContentStoreTestCase):
"""Test new course creation - happy path"""
self.assert_created_course()
@ddt.data(True, False)
@mock.patch(
'cms.djangoapps.contentstore.views.course.default_enable_flexible_peer_openassessments'
)
def test_create_course__default_enable_flexible_peer_openassessments(
self,
mock_toggle_state,
mock_default_enable_flexible_peer_openassessments
):
"""
Test that flex peer grading is forced on, when enabled
"""
# Given a new course run
test_course_data = {}
test_course_data.update(self.course_data)
course_key = _get_course_id(self.store, test_course_data)
# ... with org configured to / not to enable flex grading
mock_default_enable_flexible_peer_openassessments.return_value = mock_toggle_state
# When I create a new course
new_course_data = _create_course(self, course_key, test_course_data)
# Then the process completes successfully
new_course_key = CourseKey.from_string(new_course_data['course_key'])
new_course = self.store.get_course(new_course_key)
# ... and our setting got toggled appropriately on the course
self.assertEqual(new_course.force_on_flexible_peer_openassessments, mock_toggle_state)
@override_settings(DEFAULT_COURSE_LANGUAGE='hr')
def test_create_course_default_language(self):
"""Test new course creation and verify default language"""
@@ -2104,6 +2134,8 @@ class EntryPageTestCase(TestCase):
def _create_course(test, course_key, course_data):
"""
Creates a course via an AJAX request and verifies the URL returned in the response.
Returns the data of the POST response
"""
course_url = get_url('course_handler', course_key, 'course_key_string')
response = test.client.ajax_post(course_url, course_data)
@@ -2112,6 +2144,8 @@ def _create_course(test, course_key, course_data):
test.assertNotIn('ErrMsg', data)
test.assertEqual(data['url'], course_url)
return data
def _get_course_id(store, course_data):
"""Returns the course ID."""

View File

@@ -4,6 +4,7 @@ Test view handler for rerun (and eventually create)
import datetime
from itertools import product
from unittest import mock
import ddt
@@ -317,3 +318,54 @@ class TestCourseListing(ModuleStoreTestCase):
'run': '2021_T1'
})
self.assertEqual(response.status_code, 403)
@ddt.data(*product([True, False], [True, False]))
@ddt.unpack
@mock.patch(
'cms.djangoapps.contentstore.views.course.default_enable_flexible_peer_openassessments'
)
def test_default_enable_flexible_peer_openassessments_on_rerun(
self,
mock_toggle_state,
mock_original_course_setting,
mock_default_enable_flexible_peer_openassessments
):
"""
Test that flex peer grading is forced on, when enabled
"""
# Given a valid course to rerun
add_organization({
'name': 'Test Flex Grading',
'short_name': self.source_course_key.org,
'description': 'Test roll-forward of flex grading setting',
})
source_course = self.store.get_course(self.source_course_key)
source_course.force_on_flexible_peer_openassessments = mock_original_course_setting
self.store.update_item(source_course, self.user.id)
mock_default_enable_flexible_peer_openassessments.return_value = mock_toggle_state
# When I create a new course
response = self.client.ajax_post(self.course_create_rerun_url, {
'source_course_key': str(self.source_course_key),
'org': self.source_course_key.org,
'course': self.source_course_key.course,
'run': 'copy',
'display_name': 'New, exciting course!',
})
# Then the process completes successfully
self.assertEqual(response.status_code, 200)
data = parse_json(response)
dest_course_key = CourseKey.from_string(data['destination_course_key'])
dest_course = self.store.get_course(dest_course_key)
# ... and our setting got enabled appropriately on our new course
if mock_toggle_state:
self.assertTrue(dest_course.force_on_flexible_peer_openassessments)
# ... or preserved if the default enable setting is not on
else:
self.assertEqual(
source_course.force_on_flexible_peer_openassessments,
dest_course.force_on_flexible_peer_openassessments
)

View File

@@ -1,8 +1,6 @@
"""
Tests for validate Internationalization and XBlock i18n service.
"""
import gettext
from unittest import mock, skip
@@ -17,7 +15,6 @@ from xmodule.tests.test_export import PureXBlock
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient
from cms.djangoapps.contentstore.views.preview import _prepare_runtime_for_preview
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.lib.edx_six import get_gettext
class FakeTranslations(XBlockI18nService):
@@ -68,13 +65,8 @@ class TestXBlockI18nService(ModuleStoreTestCase):
self.test_language = 'dummy language'
self.request = mock.Mock()
self.course = CourseFactory.create()
self.field_data = mock.Mock()
self.block = BlockFactory(category="pure", parent=self.course)
_prepare_runtime_for_preview(
self.request,
self.block,
self.field_data,
)
_prepare_runtime_for_preview(self.request, self.block)
self.addCleanup(translation.deactivate)
def get_block_i18n_service(self, block):
@@ -99,7 +91,7 @@ class TestXBlockI18nService(ModuleStoreTestCase):
def __init__(self, module):
self.module = module
self.old_ugettext = get_gettext(module)
self.old_ugettext = module.gettext
def __enter__(self):
def new_ugettext(*args, **kwargs):
@@ -126,7 +118,6 @@ class TestXBlockI18nService(ModuleStoreTestCase):
# Check that the old ugettext has been put back into place
self.assertEqual(i18n_service.ugettext(self.test_language), 'dummy language')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
@mock.patch('django.utils.translation.gettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_django_translator_in_use_with_empty_block(self):
"""
@@ -135,7 +126,7 @@ class TestXBlockI18nService(ModuleStoreTestCase):
i18n_service = XBlockI18nService(None)
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ-TEST-LANGUAGE')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
@mock.patch('django.utils.translation.gettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_message_catalog_translations(self):
"""
Test: Message catalog from FakeTranslation should return required translations.
@@ -157,9 +148,9 @@ class TestXBlockI18nService(ModuleStoreTestCase):
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_block_i18n_service(self.block)
self.assertEqual(get_gettext(i18n_service)('Hello'), 'Hello')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'fr-hello-world')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'es-hello-world')
self.assertEqual(i18n_service.gettext('Hello'), 'Hello')
self.assertNotEqual(i18n_service.gettext('Hello'), 'fr-hello-world')
self.assertNotEqual(i18n_service.gettext('Hello'), 'es-hello-world')
translation.activate("fr")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,

View File

@@ -7,9 +7,6 @@ from django.conf import settings
from xblock.core import XBlock
from xblock.fields import String
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.mongo.draft import as_draft
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
@@ -41,14 +38,6 @@ class XBlockImportTest(ModuleStoreTestCase):
'set by xml'
)
@XBlock.register_temp_plugin(StubXBlock)
def test_import_draft(self):
self._assert_import(
'pure_xblock_draft',
'set by xml',
has_draft=True
)
def _assert_import(self, course_dir, expected_field_val, has_draft=False):
"""
Import a course from XML, then verify that the XBlock was loaded
@@ -66,22 +55,12 @@ class XBlockImportTest(ModuleStoreTestCase):
"""
# It is necessary to use the "old mongo" modulestore because split doesn't work
# with the "has_draft" logic below.
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
courses = import_course_from_xml(
store, self.user.id, TEST_DATA_DIR, [course_dir], create_if_not_present=True
self.store, self.user.id, TEST_DATA_DIR, [course_dir], create_if_not_present=True
)
xblock_location = courses[0].id.make_usage_key('stubxblock', 'xblock_test')
if has_draft:
xblock_location = as_draft(xblock_location)
xblock = store.get_item(xblock_location)
xblock = self.store.get_item(xblock_location)
self.assertTrue(isinstance(xblock, StubXBlock))
self.assertEqual(xblock.test_field, expected_field_val)
if has_draft:
draft_xblock = store.get_item(xblock_location)
self.assertTrue(getattr(draft_xblock, 'is_draft', False))
self.assertTrue(isinstance(draft_xblock, StubXBlock))
self.assertEqual(draft_xblock.test_field, expected_field_val)

View File

@@ -1014,28 +1014,3 @@ class TestOverrides(LibraryTestCase):
self.assertEqual(self.lc_block.source_library_version, duplicate.source_library_version)
problem2_in_course = store.get_item(duplicate.children[0])
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
class TestIncompatibleModuleStore(LibraryTestCase):
"""
Tests for proper validation errors with an incompatible course modulestore.
"""
def setUp(self):
super().setUp()
# Create a course in an incompatible modulestore.
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
def test_incompatible_modulestore(self):
"""
Verifies that, if a user is using a modulestore that doesn't support libraries,
a validation error will be produced.
"""
validation = self.lc_block.validate()
self.assertEqual(validation.summary.type, validation.summary.ERROR)
self.assertIn(
"This course does not support content libraries.", validation.summary.text)

View File

@@ -5,15 +5,18 @@ Unit tests for video utils.
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch
from unittest.mock import MagicMock, patch
import ddt
import pytz
import requests
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.test.utils import override_settings
from edxval.api import create_profile, create_video, get_course_video_image_url, update_video_image
from storages.backends.s3boto3 import S3Boto3Storage
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.video_utils import (
@@ -365,3 +368,112 @@ class ScrapeVideoThumbnailsTestCase(CourseTestCase):
# Verify that no image is attached to video1.
video1_image_url = get_course_video_image_url(course_id=course_id, edx_video_id=video1_edx_video_id)
self.assertIsNone(video1_image_url)
@ddt.ddt
class S3Boto3TestCase(TestCase):
""" verify s3boto3 returns valid backend."""
def setUp(self):
self.storage = S3Boto3Storage()
self.storage._connections.connection = MagicMock() # pylint: disable=protected-access
def test_video_backend(self):
self.assertEqual(
S3Boto3Storage,
get_storage_class(
'storages.backends.s3boto3.S3Boto3Storage',
)(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {})).__class__
)
@override_settings(VIDEO_IMAGE_SETTINGS={
'STORAGE_CLASS': 'storages.backends.s3boto3.S3Boto3Storage',
'STORAGE_KWARGS':
{'bucket_name': 'test', 'default_acl': None, 'location': 'abc/def'}}
)
def test_boto3_backend_with_params(self):
storage = get_storage_class(
settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS', {})
)(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {}))
self.assertEqual(S3Boto3Storage, storage.__class__)
def test_storage_without_global_default_acl_setting(self):
"""
In 1.9.1 package provides the default-acl=`public-read`.
AWS_DEFAULT_ACL is not defined but package will send public-read.
In 1.10.1 this test will fail because that version has no default value.
"""
name = 'test_storage_save231.txt'
content = ContentFile('new content')
storage = S3Boto3Storage(**{'bucket_name': 'test'})
storage._connections.connection = MagicMock() # pylint: disable=protected-access
storage.save(name, content)
storage.bucket.Object.assert_called_once_with(name)
obj = storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
}
)
@override_settings(AWS_DEFAULT_ACL='public-read')
@ddt.data(
('public-read', 'public-read'),
('private', 'private'),
(None, None)
)
@ddt.unpack
def test_storage_without_global_default_acl_setting_and_bucket_acls(self, default_acl, output_acl):
"""
AWS_DEFAULT_ACL set to private and let bucket level acl overrides it behaviour.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
storage = S3Boto3Storage(**{'bucket_name': 'test', 'default_acl': default_acl})
storage._connections.connection = MagicMock() # pylint: disable=protected-access
storage.save(name, content)
storage.bucket.Object.assert_called_once_with(name)
obj = storage.bucket.Object.return_value
ExtraArgs = {
'ACL': output_acl,
'ContentType': 'text/plain',
}
if default_acl is None:
del ExtraArgs['ACL']
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs=ExtraArgs
)
@ddt.data('public-read', 'private')
def test_storage_passing_default_acl_as_none(self, input_acl):
"""
check bucket-level None behaviour with different AWS_DEFAULT_ACL
"""
with override_settings(AWS_DEFAULT_ACL=input_acl):
name = 'test_storage_save231.txt'
content = ContentFile('new content')
storage = S3Boto3Storage(**{'bucket_name': 'test', 'default_acl': None})
storage._connections.connection = MagicMock() # pylint: disable=protected-access
storage.save(name, content)
storage.bucket.Object.assert_called_once_with(name)
obj = storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
}
)

View File

@@ -12,7 +12,7 @@ from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
from xmodule.tests.test_transcripts_utils import YoutubeVideoHTMLResponse
@@ -73,7 +73,7 @@ class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
Base class for Studio tests that require a logged in user and a course.
Also provides helper methods for manipulating and verifying the course.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
@@ -123,7 +123,7 @@ class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
SEQUENTIAL = 'vertical_sequential'
DRAFT_HTML = 'draft_html'
DRAFT_VIDEO = 'draft_video'
LOCKED_ASSET_KEY = AssetKey.from_string('/c4x/edX/toy/asset/sample_static.html')
LOCKED_ASSET_KEY = AssetKey.from_string('asset-v1:edX+toy+2012_Fall+type@asset+block@sample_static.html')
def assertCoursesEqual(self, course1_id, course2_id):
"""

View File

@@ -159,6 +159,25 @@ def use_new_problem_editor():
return ENABLE_NEW_PROBLEM_EDITOR_FLAG.is_enabled()
# .. toggle_name: new_editors.add_game_block_button
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the creation of the new games block
# .. toggle_use_cases: temporary
# .. toggle_creation_date: 2023-07-26
# .. toggle_target_removal_date: 2023-09-31
# .. toggle_tickets: TNL-10924
# .. toggle_warning:
ENABLE_ADD_GAME_BLOCK_FLAG = WaffleFlag('new_editors.add_game_block_button', __name__)
def use_add_game_block():
"""
Returns a boolean if add game block button is enabled
"""
return ENABLE_ADD_GAME_BLOCK_FLAG.is_enabled()
# .. toggle_name: contentstore.individualize_anonymous_user_id
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
@@ -179,17 +198,17 @@ def individualize_anonymous_user_id(course_id):
return INDIVIDUALIZE_ANONYMOUS_USER_ID.is_enabled(course_id)
# .. toggle_name: contentstore.enable_copy_paste_feature
# .. toggle_name: contentstore.enable_copy_paste_units
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: Moves most component-level actions into a submenu and adds new "Copy Component" and "Paste
# Component" actions which can be used to copy components (XBlocks) within or among courses.
# .. toggle_description: Moves most unit-level actions into a submenu and adds new "Copy Unit" and "Paste
# Unit" actions which can be used to copy units within or among courses.
# .. toggle_use_cases: temporary
# .. toggle_creation_date: 2023-02-28
# .. toggle_target_removal_date: 2023-05-01
# .. toggle_creation_date: 2023-08-01
# .. toggle_target_removal_date: 2023-10-01
# .. toggle_tickets: https://github.com/openedx/modular-learning/issues/11 https://github.com/openedx/modular-learning/issues/50
ENABLE_COPY_PASTE_FEATURE = WaffleFlag(
f'{CONTENTSTORE_NAMESPACE}.enable_copy_paste_feature',
ENABLE_COPY_PASTE_UNITS = WaffleFlag(
f'{CONTENTSTORE_NAMESPACE}.enable_copy_paste_units',
__name__,
CONTENTSTORE_LOG_PREFIX,
)
@@ -238,7 +257,7 @@ def use_new_home_page():
return ENABLE_NEW_STUDIO_HOME_PAGE.is_enabled()
# .. toggle_name: new_studio_mfe.use_new_custom_pages
# .. toggle_name: contentstore.new_studio_mfe.use_new_custom_pages
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio custom pages mfe
@@ -258,7 +277,7 @@ def use_new_custom_pages(course_key):
return ENABLE_NEW_STUDIO_CUSTOM_PAGES.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_schedule_details_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_schedule_details_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio schedule and details mfe
@@ -278,7 +297,7 @@ def use_new_schedule_details_page(course_key):
return ENABLE_NEW_STUDIO_SCHEDULE_DETAILS_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_advanced_settings_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_advanced_settings_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio advanced settings page mfe
@@ -298,7 +317,7 @@ def use_new_advanced_settings_page(course_key):
return ENABLE_NEW_STUDIO_ADVANCED_SETTINGS_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_grading_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_grading_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio grading page mfe
@@ -318,7 +337,7 @@ def use_new_grading_page(course_key):
return ENABLE_NEW_STUDIO_GRADING_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_updates_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_updates_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio updates page mfe
@@ -338,7 +357,7 @@ def use_new_updates_page(course_key):
return ENABLE_NEW_STUDIO_UPDATES_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_import_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_import_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio import page mfe
@@ -358,7 +377,7 @@ def use_new_import_page(course_key):
return ENABLE_NEW_STUDIO_IMPORT_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_export_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_export_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio export page mfe
@@ -378,7 +397,7 @@ def use_new_export_page(course_key):
return ENABLE_NEW_STUDIO_EXPORT_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_files_uploads_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_files_uploads_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio files and uploads page mfe
@@ -398,7 +417,7 @@ def use_new_files_uploads_page(course_key):
return ENABLE_NEW_STUDIO_FILES_UPLOADS_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_video_uploads_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_video_uploads_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new video uploads page mfe
@@ -418,7 +437,7 @@ def use_new_video_uploads_page(course_key):
return ENABLE_NEW_STUDIO_VIDEO_UPLOADS_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_course_outline_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_course_outline_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio course outline page mfe
@@ -438,7 +457,7 @@ def use_new_course_outline_page(course_key):
return ENABLE_NEW_STUDIO_COURSE_OUTLINE_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_unit_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_unit_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio course outline page mfe
@@ -458,7 +477,7 @@ def use_new_unit_page(course_key):
return ENABLE_NEW_STUDIO_UNIT_PAGE.is_enabled(course_key)
# .. toggle_name: new_studio_mfe.use_new_course_team_page
# .. toggle_name: contentstore.new_studio_mfe.use_new_course_team_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables the use of the new studio course team page mfe
@@ -476,3 +495,45 @@ def use_new_course_team_page(course_key):
Returns a boolean if new studio course team mfe is enabled
"""
return ENABLE_NEW_STUDIO_COURSE_TEAM_PAGE.is_enabled(course_key)
# .. toggle_name: contentstore.mock_video_uploads
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag mocks contentstore video uploads for local development, if you don't have access to AWS
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2023-7-25
# .. toggle_tickets: TNL-10897
# .. toggle_warning:
MOCK_VIDEO_UPLOADS = WaffleFlag(
f'{CONTENTSTORE_NAMESPACE}.mock_video_uploads', __name__)
def use_mock_video_uploads():
"""
Returns a boolean if video uploads should be mocked for local development
"""
return MOCK_VIDEO_UPLOADS.is_enabled()
# .. toggle_name: contentstore.default_enable_flexible_peer_openassessments
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag turns on the force_on_flexible_peer_openassessments
# setting for course reruns or new courses, where enabled.
# .. toggle_use_cases: temporary
# .. toggle_creation_date: 2023-06-27
# .. toggle_target_removal_date: 2024-01-27
# .. toggle_tickets: AU-1289
# .. toggle_warning:
DEFAULT_ENABLE_FLEXIBLE_PEER_OPENASSESSMENTS = CourseWaffleFlag(
f'{CONTENTSTORE_NAMESPACE}.default_enable_flexible_peer_openassessments', __name__)
def default_enable_flexible_peer_openassessments(course_key):
"""
Returns a boolean if ORA flexible peer grading should be toggled on for a
course rerun or new course. We expect this to be set at the organization
level to opt in/out of rolling forward this feature.
"""
return DEFAULT_ENABLE_FLEXIBLE_PEER_OPENASSESSMENTS.is_enabled(course_key)

View File

@@ -0,0 +1,265 @@
"""
Business logic for video transcripts.
"""
import logging
import os
from django.core.files.base import ContentFile
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import gettext as _
from edxval.api import (
create_or_update_video_transcript,
delete_video_transcript as delete_video_transcript_source_function,
get_3rd_party_transcription_plans,
get_available_transcript_languages,
get_video_transcript_data,
update_transcript_credentials_state_for_org,
get_video_transcript
)
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.util.json_request import JsonResponse
from openedx.core.djangoapps.video_config.models import VideoTranscriptEnabledFlag
from openedx.core.djangoapps.video_pipeline.api import update_3rd_party_transcription_service_credentials
from xmodule.video_block.transcripts_utils import Transcript, TranscriptsGenerationException # lint-amnesty, pylint: disable=wrong-import-order
from .toggles import use_mock_video_uploads
from .video_storage_handlers import TranscriptProvider
LOGGER = logging.getLogger(__name__)
class TranscriptionProviderErrorType:
"""
Transcription provider's error types enumeration.
"""
INVALID_CREDENTIALS = 1
def validate_transcript_credentials(provider, **credentials):
"""
Validates transcript credentials.
Validations:
Providers must be either 3PlayMedia or Cielo24.
In case of:
3PlayMedia - 'api_key' and 'api_secret_key' are required.
Cielo24 - 'api_key' and 'username' are required.
It ignores any extra/unrelated parameters passed in credentials and
only returns the validated ones.
"""
error_message, validated_credentials = '', {}
valid_providers = list(get_3rd_party_transcription_plans().keys())
if provider in valid_providers:
must_have_props = []
if provider == TranscriptProvider.THREE_PLAY_MEDIA:
must_have_props = ['api_key', 'api_secret_key']
elif provider == TranscriptProvider.CIELO24:
must_have_props = ['api_key', 'username']
missing = [
must_have_prop for must_have_prop in must_have_props if must_have_prop not in list(credentials.keys()) # lint-amnesty, pylint: disable=consider-iterating-dictionary
]
if missing:
error_message = '{missing} must be specified.'.format(missing=' and '.join(missing))
return error_message, validated_credentials
validated_credentials.update({
prop: credentials[prop] for prop in must_have_props
})
else:
error_message = f'Invalid Provider {provider}.'
return error_message, validated_credentials
def handle_transcript_credentials(request, course_key_string):
"""
JSON view handler to update the transcript organization credentials.
Arguments:
request: WSGI request object
course_key_string: A course identifier to extract the org.
Returns:
- A 200 response if credentials are valid and successfully updated in edx-video-pipeline.
- A 404 response if transcript feature is not enabled for this course.
- A 400 if credentials do not pass validations, hence not updated in edx-video-pipeline.
"""
course_key = CourseKey.from_string(course_key_string)
if not VideoTranscriptEnabledFlag.feature_enabled(course_key):
return HttpResponseNotFound()
provider = request.json.pop('provider')
error_message, validated_credentials = validate_transcript_credentials(provider=provider, **request.json)
if error_message:
response = JsonResponse({'error': error_message}, status=400)
else:
# Send the validated credentials to edx-video-pipeline and video-encode-manager
credentials_payload = dict(validated_credentials, org=course_key.org, provider=provider)
error_response, is_updated = update_3rd_party_transcription_service_credentials(**credentials_payload)
# Send appropriate response based on whether credentials were updated or not.
if is_updated:
# Cache credentials state in edx-val.
update_transcript_credentials_state_for_org(org=course_key.org, provider=provider, exists=is_updated)
response = JsonResponse(status=200)
else:
# Error response would contain error types and the following
# error type is received from edx-video-pipeline whenever we've
# got invalid credentials for a provider. Its kept this way because
# edx-video-pipeline doesn't support i18n translations yet.
error_type = error_response.get('error_type')
if error_type == TranscriptionProviderErrorType.INVALID_CREDENTIALS:
error_message = _('The information you entered is incorrect.')
response = JsonResponse({'error': error_message}, status=400)
return response
def handle_transcript_download(request):
"""
JSON view handler to download a transcript.
Arguments:
request: WSGI request object
Returns:
- A 200 response with SRT transcript file attached.
- A 400 if there is a validation error.
- A 404 if there is no such transcript.
"""
missing = [attr for attr in ['edx_video_id', 'language_code'] if attr not in request.GET]
if missing:
return JsonResponse(
{'error': _('The following parameters are required: {missing}.').format(missing=', '.join(missing))},
status=400
)
edx_video_id = request.GET['edx_video_id']
language_code = request.GET['language_code']
transcript = get_video_transcript_data(video_id=edx_video_id, language_code=language_code)
if transcript:
name_and_extension = os.path.splitext(transcript['file_name'])
basename, file_format = name_and_extension[0], name_and_extension[1][1:]
transcript_filename = f'{basename}.{Transcript.SRT}'
transcript_content = Transcript.convert(
content=transcript['content'],
input_format=file_format,
output_format=Transcript.SRT
)
# Construct an HTTP response
response = HttpResponse(transcript_content, content_type=Transcript.mime_types[Transcript.SRT])
response['Content-Disposition'] = f'attachment; filename="{transcript_filename}"'
else:
response = HttpResponseNotFound()
return response
def _create_or_update_video_transcript(**kwargs):
if use_mock_video_uploads():
return True
return create_or_update_video_transcript(**kwargs)
def upload_transcript(request):
"""
Upload a transcript file
Arguments:
request: A WSGI request object
Transcript file in SRT format
"""
edx_video_id = request.POST['edx_video_id']
language_code = request.POST['language_code']
new_language_code = request.POST['new_language_code']
transcript_file = request.FILES['file']
try:
# Convert SRT transcript into an SJSON format
# and upload it to S3.
sjson_subs = Transcript.convert(
content=transcript_file.read().decode('utf-8'),
input_format=Transcript.SRT,
output_format=Transcript.SJSON
).encode()
_create_or_update_video_transcript(
video_id=edx_video_id,
language_code=language_code,
metadata={
'provider': TranscriptProvider.CUSTOM,
'file_format': Transcript.SJSON,
'language_code': new_language_code
},
file_data=ContentFile(sjson_subs),
)
response = JsonResponse(status=201)
except (TranscriptsGenerationException, UnicodeDecodeError):
LOGGER.error("Unable to update transcript on edX video %s for language %s", edx_video_id, new_language_code)
response = JsonResponse(
{'error': _('There is a problem with this transcript file. Try to upload a different file.')},
status=400
)
finally:
LOGGER.info("Updated transcript on edX video %s for language %s", edx_video_id, new_language_code)
return response
def validate_transcript_upload_data(data, files):
"""
Validates video transcript file.
Arguments:
data: A request's data part.
files: A request's files part.
Returns:
None or String
If there is error returns error message otherwise None.
"""
error = None
# Validate the must have attributes - this error is unlikely to be faced by common users.
must_have_attrs = ['edx_video_id', 'language_code', 'new_language_code']
missing = [attr for attr in must_have_attrs if attr not in data]
if missing:
error = _('The following parameters are required: {missing}.').format(missing=', '.join(missing))
elif (
data['language_code'] != data['new_language_code'] and
data['new_language_code'] in get_available_transcript_languages(video_id=data['edx_video_id'])
):
error = _('A transcript with the "{language_code}" language code already exists.'.format( # lint-amnesty, pylint: disable=translation-of-non-string
language_code=data['new_language_code']
))
elif 'file' not in files:
error = _('A transcript file is required.')
return error
def delete_video_transcript(video_id=None, language_code=None):
return delete_video_transcript_source_function(video_id=video_id, language_code=language_code)
def delete_video_transcript_or_404(request):
"""
Delete a video transcript or return 404 if it doesn't exist.
"""
missing = [attr for attr in ['edx_video_id', 'language_code'] if attr not in request.GET]
if missing:
return JsonResponse(
{'error': _('The following parameters are required: {missing}.').format(missing=', '.join(missing))},
status=400
)
video_id = request.GET.get('edx_video_id')
language_code = request.GET.get('language_code')
if not get_video_transcript(video_id=video_id, language_code=language_code):
return HttpResponseNotFound()
delete_video_transcript(video_id=video_id, language_code=language_code)
return JsonResponse(status=200)

View File

@@ -1,9 +1,9 @@
"""
Common utility functions useful throughout the contentstore
"""
from collections import defaultdict
import configparser
import logging
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timezone
from uuid import uuid4
@@ -13,6 +13,7 @@ from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils import translation
from django.utils.translation import gettext as _
from help_tokens.core import HelpUrlExpert
from lti_consumer.models import CourseAllowPIISharingInLTIFlag
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys.edx.locator import LibraryLocator
@@ -26,7 +27,7 @@ from cms.djangoapps.contentstore.toggles import exam_setting_view_enabled
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.edxmako.services import MakoService
from common.djangoapps.student import auth
from common.djangoapps.student.auth import has_studio_read_access, has_studio_write_access
from common.djangoapps.student.auth import has_studio_read_access, has_studio_write_access, STUDIO_EDIT_ROLES
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import (
CourseInstructorRole,
@@ -70,6 +71,7 @@ from cms.djangoapps.contentstore.toggles import (
use_new_unit_page,
use_new_updates_page,
use_new_video_uploads_page,
use_new_custom_pages,
)
from cms.djangoapps.contentstore.toggles import use_new_text_editor, use_new_video_editor
from cms.djangoapps.models.settings.course_grading import CourseGradingModel
@@ -402,19 +404,32 @@ def get_course_outline_url(course_locator) -> str:
return course_outline_url
def get_unit_url(course_locator) -> str:
def get_unit_url(course_locator, unit_locator) -> str:
"""
Gets course authoring microfrontend URL for unit page view.
"""
unit_url = None
if use_new_unit_page(course_locator):
mfe_base_url = get_course_authoring_url(course_locator)
course_mfe_url = f'{mfe_base_url}/container/'
course_mfe_url = f'{mfe_base_url}/course/{course_locator}/container/{unit_locator}'
if mfe_base_url:
unit_url = course_mfe_url
return unit_url
def get_custom_pages_url(course_locator) -> str:
"""
Gets course authoring microfrontend URL for custom pages view.
"""
custom_pages_url = None
if use_new_custom_pages(course_locator):
mfe_base_url = get_course_authoring_url(course_locator)
course_mfe_url = f'{mfe_base_url}/course/{course_locator}/custom-pages'
if mfe_base_url:
custom_pages_url = course_mfe_url
return custom_pages_url
def course_import_olx_validation_is_enabled():
"""
Check if course olx validation is enabled on course import.
@@ -958,13 +973,12 @@ def get_subsections_by_assignment_type(course_key):
return subsections_by_assignment_type
def update_course_discussions_settings(course_key):
def update_course_discussions_settings(course):
"""
Updates course provider_type when new course is created
"""
provider = DiscussionsConfiguration.get(context_key=course_key).provider_type
provider = DiscussionsConfiguration.get(context_key=course.id).provider_type
store = modulestore()
course = store.get_course(course_key)
course.discussions_settings['provider_type'] = provider
store.update_item(course, course.published_by)
@@ -1310,6 +1324,35 @@ def get_course_settings(request, course_key, course_block):
return settings_context
def get_course_team(auth_user, course_key, user_perms):
"""
Utils is used to get context of all CMS users who are editors for the specified course.
It is used for both DRF and django views.
"""
from cms.djangoapps.contentstore.views.user import user_with_role
course_block = modulestore().get_course(course_key)
instructors = set(CourseInstructorRole(course_key).users_with_role())
# the page only lists staff and assumes they're a superset of instructors. Do a union to ensure.
staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors)
formatted_users = []
for user in instructors:
formatted_users.append(user_with_role(user, 'instructor'))
for user in staff - instructors:
formatted_users.append(user_with_role(user, 'staff'))
course_team_context = {
'context_course': course_block,
'show_transfer_ownership_hint': auth_user in instructors and len(instructors) == 1,
'users': formatted_users,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
}
return course_team_context
def get_course_grading(course_key):
"""
Utils is used to get context of course grading.
@@ -1332,6 +1375,18 @@ def get_course_grading(course_key):
return grading_context
def get_help_urls():
"""
Utils is used to get help tokens urls.
"""
ini = HelpUrlExpert.the_one()
ini.config = configparser.ConfigParser()
ini.config.read(ini.ini_file_name)
tokens = list(ini.config['pages'].keys())
help_tokens = {token: HelpUrlExpert.the_one().url_for_token(token) for token in tokens}
return help_tokens
class StudioPermissionsService:
"""
Service that can provide information about a user's permissions.

View File

@@ -0,0 +1,902 @@
"""
Views related to the video upload feature
"""
import codecs
import csv
import io
import json
import logging
from contextlib import closing
from datetime import datetime, timedelta
from uuid import uuid4
from boto.s3.connection import S3Connection
from boto import s3
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.http import FileResponse, HttpResponseNotFound
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import gettext as _
from django.utils.translation import gettext_noop
from edx_toggles.toggles import WaffleSwitch
from edxval.api import (
SortDirection,
VideoSortField,
create_or_update_transcript_preferences,
create_video,
get_3rd_party_transcription_plans,
get_available_transcript_languages,
get_video_transcript_url,
get_transcript_credentials_state_for_org,
get_transcript_preferences,
get_videos_for_course,
remove_transcript_preferences,
remove_video_for_course,
update_video_image,
update_video_status
)
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from rest_framework import status as rest_status
from rest_framework.response import Response
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.util.json_request import JsonResponse
from openedx.core.djangoapps.video_config.models import VideoTranscriptEnabledFlag
from openedx.core.djangoapps.video_config.toggles import PUBLIC_VIDEO_SHARE
from openedx.core.djangoapps.video_pipeline.config.waffle import (
DEPRECATE_YOUTUBE,
ENABLE_DEVSTACK_VIDEO_UPLOADS,
)
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
from xmodule.video_block.transcripts_utils import Transcript # lint-amnesty, pylint: disable=wrong-import-order
from .models import VideoUploadConfig
from .toggles import use_new_video_uploads_page, use_mock_video_uploads
from .utils import reverse_course_url, get_video_uploads_url
from .video_utils import validate_video_image
from .views.course import get_course_and_check_access
LOGGER = logging.getLogger(__name__)
# Waffle switches namespace for videos
WAFFLE_NAMESPACE = 'videos'
# Waffle switch for enabling/disabling video image upload feature
VIDEO_IMAGE_UPLOAD_ENABLED = WaffleSwitch( # lint-amnesty, pylint: disable=toggle-missing-annotation
f'{WAFFLE_NAMESPACE}.video_image_upload_enabled', __name__
)
# Waffle flag namespace for studio
WAFFLE_STUDIO_FLAG_NAMESPACE = 'studio'
ENABLE_VIDEO_UPLOAD_PAGINATION = CourseWaffleFlag( # lint-amnesty, pylint: disable=toggle-missing-annotation
f'{WAFFLE_STUDIO_FLAG_NAMESPACE}.enable_video_upload_pagination', __name__
)
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
VIDEO_SUPPORTED_FILE_FORMATS = {
'.mp4': 'video/mp4',
'.mov': 'video/quicktime',
}
VIDEO_UPLOAD_MAX_FILE_SIZE_GB = 5
# maximum time for video to remain in upload state
MAX_UPLOAD_HOURS = 24
VIDEOS_PER_PAGE = 100
class TranscriptProvider:
"""
Transcription Provider Enumeration
"""
CIELO24 = 'Cielo24'
THREE_PLAY_MEDIA = '3PlayMedia'
CUSTOM = 'Custom'
class StatusDisplayStrings:
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = gettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = gettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = gettext_noop("Ready")
# Translators: This is the status for a video that is uploaded completely
_UPLOAD_COMPLETED = gettext_noop("Uploaded")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = gettext_noop("Failed")
# Translators: This is the status for a video that is cancelled during upload by user
_CANCELLED = gettext_noop("Cancelled")
# Translators: This is the status for a video which has failed
# due to being flagged as a duplicate by an external or internal CMS
_DUPLICATE = gettext_noop("Failed Duplicate")
# Translators: This is the status for a video which has duplicate token for youtube
_YOUTUBE_DUPLICATE = gettext_noop("YouTube Duplicate")
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = gettext_noop("Invalid Token")
# Translators: This is the status for a video that was included in a course import
_IMPORTED = gettext_noop("Imported")
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = gettext_noop("Unknown")
# Translators: This is the status for a video that is having its transcription in progress on servers
_TRANSCRIPTION_IN_PROGRESS = gettext_noop("Transcription in Progress")
# Translators: This is the status for a video whose transcription is complete
_TRANSCRIPT_READY = gettext_noop("Transcript Ready")
# Translators: This is the status for a video whose transcription job was failed for some languages
_PARTIAL_FAILURE = gettext_noop("Partial Failure")
# Translators: This is the status for a video whose transcription job has failed altogether
_TRANSCRIPT_FAILED = gettext_noop("Transcript Failed")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"upload_completed": _UPLOAD_COMPLETED,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"upload_failed": _FAILED,
"s3_upload_failed": _FAILED,
"upload_cancelled": _CANCELLED,
"duplicate": _DUPLICATE,
"youtube_duplicate": _YOUTUBE_DUPLICATE,
"invalid_token": _INVALID_TOKEN,
"imported": _IMPORTED,
"transcription_in_progress": _TRANSCRIPTION_IN_PROGRESS,
"transcript_ready": _TRANSCRIPT_READY,
"partial_failure": _PARTIAL_FAILURE,
# TODO: Add a related unit tests when the VAL update is part of platform
"transcript_failed": _TRANSCRIPT_FAILED,
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
# pylint: disable=translation-of-non-string
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN))
def handle_videos(request, course_key_string, edx_video_id=None):
"""
Restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: generate new video upload urls, for example upload urls for S3 buckets. To upload the video, you should
make a PUT request to the returned upload_url values. This can happen on the frontend, MFE,
or client side - it is not implemented in the backend.
Example payload:
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
Returns (JSON):
{
"files": [{
"file_name": "video.mp4",
"upload_url": "http://example.com/put_video"
}]
}
DELETE
soft deletes a video for particular course
"""
course = _get_and_validate_course(course_key_string, request.user)
if (not course and not use_mock_video_uploads()):
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
pagination_conf = _generate_pagination_configuration(course_key_string, request)
return videos_index_html(course, pagination_conf)
elif request.method == "DELETE":
remove_video_for_course(course_key_string, edx_video_id)
return JsonResponse()
else:
if is_status_update_request(request.json):
return send_video_status_update(request.json)
elif _is_pagination_context_update_request(request):
return _update_pagination_context(request)
data, status = videos_post(course, request)
return JsonResponse(data, status=status)
def handle_generate_video_upload_link(request, course_key_string):
"""
API for creating a video upload. Returns an edx_video_id and a presigned URL that can be used
to upload the video to AWS S3.
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return Response(data='Course Not Found', status=rest_status.HTTP_400_BAD_REQUEST)
data, status = videos_post(course, request)
return Response(data, status=status)
def handle_video_images(request, course_key_string, edx_video_id=None):
"""Function to handle image files"""
# respond with a 404 if image upload is not enabled.
if not VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled() and not use_mock_video_uploads():
return HttpResponseNotFound()
if 'file' not in request.FILES:
return JsonResponse({'error': _('An image file is required.')}, status=400)
image_file = request.FILES['file']
error = validate_video_image(image_file)
if error:
return JsonResponse({'error': error}, status=400)
with closing(image_file):
image_url = update_video_image(edx_video_id, course_key_string, image_file, image_file.name)
LOGGER.info(
'VIDEOS: Video image uploaded for edx_video_id [%s] in course [%s]', edx_video_id, course_key_string
)
return JsonResponse({'image_url': image_url})
def check_video_images_upload_enabled(request):
"""Function to check if images can be uploaded"""
# respond with a false if image upload is not enabled.
if not VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled():
return JsonResponse({'allowThumbnailUpload': False})
return JsonResponse({'allowThumbnailUpload': True})
def enabled_video_features(request):
""" Return a dict with info about which video features are enabled """
features = {
'allowThumbnailUpload': VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled(),
'videoSharingEnabled': PUBLIC_VIDEO_SHARE.is_enabled(),
}
return JsonResponse(features)
def validate_transcript_preferences(provider, cielo24_fidelity, cielo24_turnaround,
three_play_turnaround, video_source_language, preferred_languages):
"""
Validate 3rd Party Transcription Preferences.
Arguments:
provider: Transcription provider
cielo24_fidelity: Cielo24 transcription fidelity.
cielo24_turnaround: Cielo24 transcription turnaround.
three_play_turnaround: 3PlayMedia transcription turnaround.
video_source_language: Source/Speech language of the videos that are going to be submitted to the Providers.
preferred_languages: list of language codes.
Returns:
validated preferences or a validation error.
"""
error, preferences = None, {}
# validate transcription providers
transcription_plans = get_3rd_party_transcription_plans()
if provider in list(transcription_plans.keys()): # lint-amnesty, pylint: disable=consider-iterating-dictionary
# Further validations for providers
if provider == TranscriptProvider.CIELO24:
# Validate transcription fidelity
if cielo24_fidelity in transcription_plans[provider]['fidelity']:
# Validate transcription turnaround
if cielo24_turnaround not in transcription_plans[provider]['turnaround']:
error = f'Invalid cielo24 turnaround {cielo24_turnaround}.'
return error, preferences
# Validate transcription languages
supported_languages = transcription_plans[provider]['fidelity'][cielo24_fidelity]['languages']
if video_source_language not in supported_languages:
error = f'Unsupported source language {video_source_language}.'
return error, preferences
if not preferred_languages or not set(preferred_languages) <= set(supported_languages.keys()):
error = f'Invalid languages {preferred_languages}.'
return error, preferences
# Validated Cielo24 preferences
preferences = {
'video_source_language': video_source_language,
'cielo24_fidelity': cielo24_fidelity,
'cielo24_turnaround': cielo24_turnaround,
'preferred_languages': preferred_languages,
}
else:
error = f'Invalid cielo24 fidelity {cielo24_fidelity}.'
elif provider == TranscriptProvider.THREE_PLAY_MEDIA:
# Validate transcription turnaround
if three_play_turnaround not in transcription_plans[provider]['turnaround']:
error = f'Invalid 3play turnaround {three_play_turnaround}.'
return error, preferences
# Validate transcription languages
valid_translations_map = transcription_plans[provider]['translations']
if video_source_language not in list(valid_translations_map.keys()):
error = f'Unsupported source language {video_source_language}.'
return error, preferences
valid_target_languages = valid_translations_map[video_source_language]
if not preferred_languages or not set(preferred_languages) <= set(valid_target_languages):
error = f'Invalid languages {preferred_languages}.'
return error, preferences
# Validated 3PlayMedia preferences
preferences = {
'three_play_turnaround': three_play_turnaround,
'video_source_language': video_source_language,
'preferred_languages': preferred_languages,
}
else:
error = f'Invalid provider {provider}.'
return error, preferences
def handle_transcript_preferences(request, course_key_string):
"""
JSON view handler to post the transcript preferences.
Arguments:
request: WSGI request object
course_key_string: string for course key
Returns: valid json response or 400 with error message
"""
course_key = CourseKey.from_string(course_key_string)
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course_key)
if not is_video_transcript_enabled:
return HttpResponseNotFound()
if request.method == 'POST':
data = request.json
provider = data.get('provider')
error, preferences = validate_transcript_preferences(
provider=provider,
cielo24_fidelity=data.get('cielo24_fidelity', ''),
cielo24_turnaround=data.get('cielo24_turnaround', ''),
three_play_turnaround=data.get('three_play_turnaround', ''),
video_source_language=data.get('video_source_language'),
preferred_languages=list(map(str, data.get('preferred_languages', [])))
)
if error:
response = JsonResponse({'error': error}, status=400)
else:
preferences.update({'provider': provider})
transcript_preferences = create_or_update_transcript_preferences(course_key_string, **preferences)
response = JsonResponse({'transcript_preferences': transcript_preferences}, status=200)
return response
elif request.method == 'DELETE':
remove_transcript_preferences(course_key_string)
return JsonResponse()
def get_video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos, __ = _get_videos(course)
videos = list(videos)
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return dict(ret.items())
# Write csv to bytes-like object. We need a separate writer and buffer as the csv
# writer writes str and the FileResponse expects a bytes files.
buffer = io.BytesIO()
buffer_writer = codecs.getwriter("utf-8")(buffer)
writer = csv.DictWriter(
buffer_writer,
[name_col, duration_col, added_col, video_id_col, status_col] + profile_cols,
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
buffer.seek(0)
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course) + ".csv"
return FileResponse(buffer, as_attachment=True, filename=filename, content_type="text/csv")
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
def convert_video_status(video, is_video_encodes_ready=False):
"""
Convert status of a video. Status can be converted to one of the following:
* FAILED if video is in `upload` state for more than 24 hours
* `YouTube Duplicate` if status is `invalid_token`
* user-friendly video status
"""
now = datetime.now(video.get('created', datetime.now().replace(tzinfo=UTC)).tzinfo)
if video['status'] == 'upload' and (now - video['created']) > timedelta(hours=MAX_UPLOAD_HOURS):
new_status = 'upload_failed'
status = StatusDisplayStrings.get(new_status)
message = 'Video with id [{}] is still in upload after [{}] hours, setting status to [{}]'.format(
video['edx_video_id'], MAX_UPLOAD_HOURS, new_status
)
send_video_status_update([
{
'edxVideoId': video['edx_video_id'],
'status': new_status,
'message': message
}
])
elif video['status'] == 'invalid_token':
status = StatusDisplayStrings.get('youtube_duplicate')
elif is_video_encodes_ready:
status = StatusDisplayStrings.get('file_complete')
else:
status = StatusDisplayStrings.get(video['status'])
return status
def _get_videos(course, pagination_conf=None):
"""
Retrieves the list of videos from VAL corresponding to this course.
"""
videos, pagination_context = get_videos_for_course(
str(course.id),
VideoSortField.created,
SortDirection.desc,
pagination_conf
)
videos = list(videos)
# This is required to see if edx video pipeline is enabled while converting the video status.
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
transcription_statuses = ['transcription_in_progress', 'transcript_ready', 'partial_failure', 'transcript_failed']
# convert VAL's status to studio's Video Upload feature status.
for video in videos:
# If we are using "new video workflow" and status is in `transcription_statuses` then video encodes are ready.
# This is because Transcription starts once all the encodes are complete except for YT, but according to
# "new video workflow" YT is disabled as well as deprecated. So, Its precise to say that the Transcription
# starts once all the encodings are complete *for the new video workflow*.
is_video_encodes_ready = not course_video_upload_token and (video['status'] in transcription_statuses)
# Update with transcript languages
video['transcripts'] = get_available_transcript_languages(video_id=video['edx_video_id'])
video['transcription_status'] = (
StatusDisplayStrings.get(video['status']) if is_video_encodes_ready else ''
)
video['transcript_urls'] = {}
for language_code in video['transcripts']:
video['transcript_urls'][language_code] = get_video_transcript_url(
video_id=video['edx_video_id'],
language_code=language_code,
)
# Convert the video status.
video['status'] = convert_video_status(video, is_video_encodes_ready)
return videos, pagination_context
def _get_default_video_image_url():
"""
Returns default video image url
"""
return staticfiles_storage.url(settings.VIDEO_IMAGE_DEFAULT_FILENAME)
def _get_index_videos(course, pagination_conf=None):
"""
Returns the information about each video upload required for the video list
"""
course_id = str(course.id)
attrs = [
'edx_video_id', 'client_video_id', 'created', 'duration',
'status', 'courses', 'transcripts', 'transcription_status',
'transcript_urls', 'error_description'
]
def _get_values(video):
"""
Get data for predefined video attributes.
"""
values = {}
for attr in attrs:
if attr == 'courses':
course = [c for c in video['courses'] if course_id in c]
(__, values['course_video_image_url']), = list(course[0].items())
else:
values[attr] = video[attr]
return values
videos, pagination_context = _get_videos(course, pagination_conf)
return [_get_values(video) for video in videos], pagination_context
def get_all_transcript_languages():
"""
Returns all possible languages for transcript.
"""
third_party_transcription_languages = {}
transcription_plans = get_3rd_party_transcription_plans()
cielo_fidelity = transcription_plans[TranscriptProvider.CIELO24]['fidelity']
# Get third party transcription languages.
third_party_transcription_languages.update(transcription_plans[TranscriptProvider.THREE_PLAY_MEDIA]['languages'])
third_party_transcription_languages.update(cielo_fidelity['MECHANICAL']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PREMIUM']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PROFESSIONAL']['languages'])
all_languages_dict = dict(settings.ALL_LANGUAGES, **third_party_transcription_languages)
# Return combined system settings and 3rd party transcript languages.
all_languages = []
for key, value in sorted(all_languages_dict.items(), key=lambda k_v: k_v[1]):
all_languages.append({
'language_code': key,
'language_text': value
})
return all_languages
def videos_index_html(course, pagination_conf=None):
"""
Returns an HTML page to display previous video uploads and allow new ones
"""
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
previous_uploads, pagination_context = _get_index_videos(course, pagination_conf)
context = {
'context_course': course,
'image_upload_url': reverse_course_url('video_images_handler', str(course.id)),
'video_handler_url': reverse_course_url('videos_handler', str(course.id)),
'encodings_download_url': reverse_course_url('video_encodings_download', str(course.id)),
'default_video_image_url': _get_default_video_image_url(),
'previous_uploads': previous_uploads,
'concurrent_upload_limit': settings.VIDEO_UPLOAD_PIPELINE.get('CONCURRENT_UPLOAD_LIMIT', 0),
'video_supported_file_formats': list(VIDEO_SUPPORTED_FILE_FORMATS.keys()),
'video_upload_max_file_size': VIDEO_UPLOAD_MAX_FILE_SIZE_GB,
'video_image_settings': {
'video_image_upload_enabled': VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled(),
'max_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MAX_BYTES'],
'min_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'],
'max_width': settings.VIDEO_IMAGE_MAX_WIDTH,
'max_height': settings.VIDEO_IMAGE_MAX_HEIGHT,
'supported_file_formats': settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS
},
'is_video_transcript_enabled': is_video_transcript_enabled,
'active_transcript_preferences': None,
'transcript_credentials': None,
'transcript_available_languages': get_all_transcript_languages(),
'video_transcript_settings': {
'transcript_download_handler_url': reverse('transcript_download_handler'),
'transcript_upload_handler_url': reverse('transcript_upload_handler'),
'transcript_delete_handler_url': reverse_course_url('transcript_delete_handler', str(course.id)),
'trancript_download_file_format': Transcript.SRT
},
'pagination_context': pagination_context
}
if is_video_transcript_enabled:
context['video_transcript_settings'].update({
'transcript_preferences_handler_url': reverse_course_url(
'transcript_preferences_handler',
str(course.id)
),
'transcript_credentials_handler_url': reverse_course_url(
'transcript_credentials_handler',
str(course.id)
),
'transcription_plans': get_3rd_party_transcription_plans(),
})
context['active_transcript_preferences'] = get_transcript_preferences(str(course.id))
# Cached state for transcript providers' credentials (org-specific)
context['transcript_credentials'] = get_transcript_credentials_state_for_org(course.id.org)
if use_new_video_uploads_page(course.id):
return redirect(get_video_uploads_url(course.id))
return render_to_response('videos_index.html', context)
def videos_index_json(course):
"""
Returns JSON in the following format:
{
'videos': [{
'edx_video_id': 'aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa',
'client_video_id': 'video.mp4',
'created': '1970-01-01T00:00:00Z',
'duration': 42.5,
'status': 'upload',
'course_video_image_url': 'https://video/images/1234.jpg'
}]
}
"""
index_videos, __ = _get_index_videos(course)
return JsonResponse({"videos": index_videos}, status=200)
def videos_post(course, request):
"""
Input (JSON):
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
Returns (JSON):
{
"files": [{
"file_name": "video.mp4",
"upload_url": "http://example.com/put_video"
}]
}
The returned array corresponds exactly to the input array.
"""
if use_mock_video_uploads():
return {'files': [{'file_name': 'video.mp4', 'upload_url': 'http://example.com/put_video'}]}, 200
error = None
data = request.json
if 'files' not in data:
error = "Request object is not JSON or does not contain 'files'"
elif any(
'file_name' not in file or 'content_type' not in file
for file in data['files']
):
error = "Request 'files' entry does not contain 'file_name' and 'content_type'"
elif any(
file['content_type'] not in list(VIDEO_SUPPORTED_FILE_FORMATS.values())
for file in data['files']
):
error = "Request 'files' entry contain unsupported content_type"
if error:
return {'error': error}, 400
bucket = storage_service_bucket()
req_files = data['files']
resp_files = []
for req_file in req_files:
file_name = req_file['file_name']
try:
file_name.encode('ascii')
except UnicodeEncodeError:
error_msg = 'The file name for %s must contain only ASCII characters.' % file_name
return {'error': error_msg}, 400
edx_video_id = str(uuid4())
key = storage_service_key(bucket, file_name=edx_video_id)
metadata_list = [
('client_video_id', file_name),
('course_key', str(course.id)),
]
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
# Only include `course_video_upload_token` if youtube has not been deprecated
# for this course.
if not DEPRECATE_YOUTUBE.is_enabled(course.id) and course_video_upload_token:
metadata_list.append(('course_video_upload_token', course_video_upload_token))
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
if is_video_transcript_enabled:
transcript_preferences = get_transcript_preferences(str(course.id))
if transcript_preferences is not None:
metadata_list.append(('transcript_preferences', json.dumps(transcript_preferences)))
for metadata_name, value in metadata_list:
key.set_metadata(metadata_name, value)
upload_url = key.generate_url(
KEY_EXPIRATION_IN_SECONDS,
'PUT',
headers={'Content-Type': req_file['content_type']}
)
# persist edx_video_id in VAL
create_video({
'edx_video_id': edx_video_id,
'status': 'upload',
'client_video_id': file_name,
'duration': 0,
'encoded_videos': [],
'courses': [str(course.id)]
})
resp_files.append({'file_name': file_name, 'upload_url': upload_url, 'edx_video_id': edx_video_id})
return {'files': resp_files}, 200
def storage_service_bucket():
"""
Returns an S3 bucket for video upload.
"""
if ENABLE_DEVSTACK_VIDEO_UPLOADS.is_enabled():
params = {
'aws_access_key_id': settings.AWS_ACCESS_KEY_ID,
'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY,
'security_token': settings.AWS_SECURITY_TOKEN
}
else:
params = {
'aws_access_key_id': settings.AWS_ACCESS_KEY_ID,
'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY
}
conn = S3Connection(**params)
# We don't need to validate our bucket, it requires a very permissive IAM permission
# set since behind the scenes it fires a HEAD request that is equivalent to get_all_keys()
# meaning it would need ListObjects on the whole bucket, not just the path used in each
# environment (since we share a single bucket for multiple deployments in some configurations)
return conn.get_bucket(settings.VIDEO_UPLOAD_PIPELINE['VEM_S3_BUCKET'], validate=False)
def storage_service_key(bucket, file_name):
"""
Returns an S3 key to the given file in the given bucket.
"""
key_name = "{}/{}".format(
settings.VIDEO_UPLOAD_PIPELINE.get("ROOT_PATH", ""),
file_name
)
return s3.key.Key(bucket, key_name)
def send_video_status_update(updates):
"""
Update video status in edx-val.
"""
for update in updates:
update_video_status(update.get('edxVideoId'), update.get('status'))
LOGGER.info(
'VIDEOS: Video status update with id [%s], status [%s] and message [%s]',
update.get('edxVideoId'),
update.get('status'),
update.get('message')
)
return JsonResponse()
def is_status_update_request(request_data):
"""
Returns True if `request_data` contains status update else False.
"""
return any('status' in update for update in request_data)
def _generate_pagination_configuration(course_key_string, request):
"""
Returns pagination configuration
"""
course_key = CourseKey.from_string(course_key_string)
if not ENABLE_VIDEO_UPLOAD_PAGINATION.is_enabled(course_key):
return None
return {
'page_number': request.GET.get('page', 1),
'videos_per_page': request.session.get("VIDEOS_PER_PAGE", VIDEOS_PER_PAGE)
}
def _is_pagination_context_update_request(request):
"""
Checks if request contains `videos_per_page`
"""
return request.POST.get('id', '') == "videos_per_page"
def _update_pagination_context(request):
"""
Updates session with posted value
"""
error_msg = _('A non zero positive integer is expected')
try:
videos_per_page = int(request.POST.get('value'))
if videos_per_page <= 0:
return JsonResponse({'error': error_msg}, status=500)
except ValueError:
return JsonResponse({'error': error_msg}, status=500)
request.session['VIDEOS_PER_PAGE'] = videos_per_page
return JsonResponse()

View File

@@ -1,39 +1,20 @@
"""Views for assets"""
import json
import logging
import math
import re
from functools import partial
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseBadRequest, HttpResponseNotFound
from django.utils.translation import gettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods, require_POST
from opaque_keys.edx.keys import AssetKey, CourseKey
from pymongo import ASCENDING, DESCENDING
from cms.djangoapps.contentstore.asset_storage_handlers import (
handle_assets,
get_asset_usage_path,
update_course_run_asset as update_course_run_asset_source_function,
get_file_size as get_file_size_source_function,
delete_asset as delete_asset_source_function,
get_asset_json as get_asset_json_source_function,
update_asset as update_asset_source_function,
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.util.date_utils import get_default_time_display
from common.djangoapps.util.json_request import JsonResponse
from openedx.core.djangoapps.contentserver.caching import del_cached_content
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from xmodule.contentstore.content import StaticContent # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.contentstore.django import contentstore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.exceptions import NotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
)
from ..exceptions import AssetNotFoundException, AssetSizeTooLargeException
from ..utils import reverse_course_url
__all__ = ['assets_handler']
__all__ = ['assets_handler', 'asset_usage_path_handler']
REQUEST_DEFAULTS = {
'page': 0,
@@ -64,552 +45,47 @@ def assets_handler(request, course_key_string=None, asset_key_string=None):
asset_type: the file type to filter items to (defaults to All)
text_search: string to filter results by file name (defaults to '')
POST
json: create (or update?) an asset. The only updating that can be done is changing the lock state.
json: create or update an asset. The only updating that can be done is changing the lock state.
PUT
json: update the locked state of an asset
json: create or update an asset. The only updating that can be done is changing the lock state.
DELETE
json: delete an asset
'''
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
return handle_assets(request, course_key_string, asset_key_string)
response_format = _get_response_format(request)
if _request_response_format_is_json(request, response_format):
if request.method == 'GET':
return _assets_json(request, course_key)
asset_key = AssetKey.from_string(asset_key_string) if asset_key_string else None
return _update_asset(request, course_key, asset_key)
elif request.method == 'GET': # assume html
return _asset_index(request, course_key)
return HttpResponseNotFound()
def _get_response_format(request):
return request.GET.get('format') or request.POST.get('format') or 'html'
def _request_response_format_is_json(request, response_format):
return response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json')
def _asset_index(request, course_key):
'''
Display an editable asset library.
Supports start (0-based index into the list of assets) and max query parameters.
'''
course_block = modulestore().get_course(course_key)
return render_to_response('asset_index.html', {
'language_code': request.LANGUAGE_CODE,
'context_course': course_block,
'max_file_size_in_mbs': settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB,
'chunk_size_in_mbs': settings.UPLOAD_CHUNK_SIZE_IN_MB,
'max_file_size_redirect_url': settings.MAX_ASSET_UPLOAD_FILE_SIZE_URL,
'asset_callback_url': reverse_course_url('assets_handler', course_key)
})
def _assets_json(request, course_key):
'''
Display an editable asset library.
Supports start (0-based index into the list of assets) and max query parameters.
'''
request_options = _parse_request_to_dictionary(request)
filter_parameters = {}
if request_options['requested_asset_type']:
filters_are_invalid_error = _get_error_if_invalid_parameters(request_options['requested_asset_type'])
if filters_are_invalid_error is not None:
return filters_are_invalid_error
filter_parameters.update(_get_content_type_filter_for_mongo(request_options['requested_asset_type']))
if request_options['requested_text_search']:
filter_parameters.update(_get_displayname_search_filter_for_mongo(request_options['requested_text_search']))
sort_type_and_direction = _get_sort_type_and_direction(request_options)
requested_page_size = request_options['requested_page_size']
current_page = _get_current_page(request_options['requested_page'])
first_asset_to_display_index = _get_first_asset_index(current_page, requested_page_size)
query_options = {
'current_page': current_page,
'page_size': requested_page_size,
'sort': sort_type_and_direction,
'filter_params': filter_parameters
}
assets, total_count = _get_assets_for_page(course_key, query_options)
if request_options['requested_page'] > 0 and first_asset_to_display_index >= total_count and total_count > 0: # lint-amnesty, pylint: disable=chained-comparison
_update_options_to_requery_final_page(query_options, total_count)
current_page = query_options['current_page']
first_asset_to_display_index = _get_first_asset_index(current_page, requested_page_size)
assets, total_count = _get_assets_for_page(course_key, query_options)
last_asset_to_display_index = first_asset_to_display_index + len(assets)
assets_in_json_format = _get_assets_in_json_format(assets, course_key)
response_payload = {
'start': first_asset_to_display_index,
'end': last_asset_to_display_index,
'page': current_page,
'pageSize': requested_page_size,
'totalCount': total_count,
'assets': assets_in_json_format,
'sort': request_options['requested_sort'],
'direction': request_options['requested_sort_direction'],
'assetTypes': _get_requested_file_types_from_requested_filter(request_options['requested_asset_type']),
'textSearch': request_options['requested_text_search'],
}
return JsonResponse(response_payload)
def _parse_request_to_dictionary(request):
return {
'requested_page': int(_get_requested_attribute(request, 'page')),
'requested_page_size': int(_get_requested_attribute(request, 'page_size')),
'requested_sort': _get_requested_attribute(request, 'sort'),
'requested_sort_direction': _get_requested_attribute(request, 'direction'),
'requested_asset_type': _get_requested_attribute(request, 'asset_type'),
'requested_text_search': _get_requested_attribute(request, 'text_search'),
}
def _get_requested_attribute(request, attribute):
return request.GET.get(attribute, REQUEST_DEFAULTS.get(attribute))
def _get_error_if_invalid_parameters(requested_filter):
"""Function for returning error messages on filters"""
requested_file_types = _get_requested_file_types_from_requested_filter(requested_filter)
invalid_filters = []
# OTHER is not described in the settings file as a filter
all_valid_file_types = set(_get_files_and_upload_type_filters().keys())
all_valid_file_types.add('OTHER')
for requested_file_type in requested_file_types:
if requested_file_type not in all_valid_file_types:
invalid_filters.append(requested_file_type)
if invalid_filters:
error_message = {
'error_code': 'invalid_asset_type_filter',
'developer_message': 'The asset_type parameter to the request is invalid. '
'The {} filters are not described in the settings.FILES_AND_UPLOAD_TYPE_FILTERS '
'dictionary.'.format(invalid_filters)
}
return JsonResponse({'error': error_message}, status=400)
def _get_content_type_filter_for_mongo(requested_filter):
"""
Construct and return pymongo query dict for the given content type categories.
"""
requested_file_types = _get_requested_file_types_from_requested_filter(requested_filter)
type_filter = {
"$or": []
}
if 'OTHER' in requested_file_types:
type_filter["$or"].append(_get_mongo_expression_for_type_other())
requested_file_types.remove('OTHER')
type_filter["$or"].append(_get_mongo_expression_for_type_filter(requested_file_types))
return type_filter
def _get_mongo_expression_for_type_other():
"""
Construct and return pymongo expression dict for the 'OTHER' content type category.
"""
content_types = [ext for extensions in _get_files_and_upload_type_filters().values() for ext in extensions]
return {
'contentType': {
'$nin': content_types
}
}
def _get_mongo_expression_for_type_filter(requested_file_types):
"""
Construct and return pymongo expression dict for the named content type categories.
The named content categories are the keys of the FILES_AND_UPLOAD_TYPE_FILTERS setting that are not 'OTHER':
'Images', 'Documents', 'Audio', and 'Code'.
"""
content_types = []
files_and_upload_type_filters = _get_files_and_upload_type_filters()
for requested_file_type in requested_file_types:
content_types.extend(files_and_upload_type_filters[requested_file_type])
return {
'contentType': {
'$in': content_types
}
}
def _get_displayname_search_filter_for_mongo(text_search):
"""
Return a pymongo query dict for the given search string, using case insensitivity.
"""
filters = []
text_search_tokens = text_search.split()
for token in text_search_tokens:
escaped_token = re.escape(token)
filters.append({
'displayname': {
'$regex': escaped_token,
'$options': 'i',
},
})
return {
'$and': filters,
}
def _get_files_and_upload_type_filters():
return settings.FILES_AND_UPLOAD_TYPE_FILTERS
def _get_requested_file_types_from_requested_filter(requested_filter):
return requested_filter.split(',') if requested_filter else []
def _get_sort_type_and_direction(request_options):
sort_type = _get_mongo_sort_from_requested_sort(request_options['requested_sort'])
sort_direction = _get_sort_direction_from_requested_sort(request_options['requested_sort_direction'])
return [(sort_type, sort_direction)]
def _get_mongo_sort_from_requested_sort(requested_sort):
"""Function returns sorts dataset based on the key provided"""
if requested_sort == 'date_added':
sort = 'uploadDate'
elif requested_sort == 'display_name':
sort = 'displayname'
else:
sort = requested_sort
return sort
def _get_sort_direction_from_requested_sort(requested_sort_direction):
if requested_sort_direction.lower() == 'asc':
return ASCENDING
return DESCENDING
def _get_current_page(requested_page):
return max(requested_page, 0)
def _get_first_asset_index(current_page, page_size):
return current_page * page_size
def _get_assets_for_page(course_key, options):
"""returns course content for given course and options"""
current_page = options['current_page']
page_size = options['page_size']
sort = options['sort']
filter_params = options['filter_params'] if options['filter_params'] else None
start = current_page * page_size
return contentstore().get_all_content_for_course(
course_key, start=start, maxresults=page_size, sort=sort, filter_params=filter_params
)
def _update_options_to_requery_final_page(query_options, total_asset_count):
"""sets current_page value based on asset count and page_size"""
query_options['current_page'] = int(math.floor((total_asset_count - 1) / query_options['page_size']))
def _get_assets_in_json_format(assets, course_key):
"""returns assets information in JSON Format"""
assets_in_json_format = []
for asset in assets:
thumbnail_asset_key = _get_thumbnail_asset_key(asset, course_key)
asset_is_locked = asset.get('locked', False)
asset_in_json = _get_asset_json(
asset['displayname'],
asset['contentType'],
asset['uploadDate'],
asset['asset_key'],
thumbnail_asset_key,
asset_is_locked,
course_key,
)
assets_in_json_format.append(asset_in_json)
return assets_in_json_format
@login_required
@ensure_csrf_cookie
def asset_usage_path_handler(request, course_key_string, asset_key_string):
return get_asset_usage_path(request, course_key_string, asset_key_string)
def update_course_run_asset(course_key, upload_file):
"""returns contents of the uploaded file"""
course_exists_response = _get_error_if_course_does_not_exist(course_key)
if course_exists_response is not None:
return course_exists_response
file_metadata = _get_file_metadata_as_dictionary(upload_file)
is_file_too_large = _check_file_size_is_too_large(file_metadata)
if is_file_too_large:
error_message = _get_file_too_large_error_message(file_metadata['filename'])
raise AssetSizeTooLargeException(error_message)
content, temporary_file_path = _get_file_content_and_path(file_metadata, course_key)
(thumbnail_content, thumbnail_location) = contentstore().generate_thumbnail(content,
tempfile_path=temporary_file_path)
# delete cached thumbnail even if one couldn't be created this time (else the old thumbnail will continue to show)
del_cached_content(thumbnail_location)
if _check_thumbnail_uploaded(thumbnail_content):
content.thumbnail_location = thumbnail_location
contentstore().save(content)
del_cached_content(content.location)
return content
@require_POST
@ensure_csrf_cookie
@login_required
def _upload_asset(request, course_key):
"""uploads the file in request and returns JSON response"""
course_exists_error = _get_error_if_course_does_not_exist(course_key)
if course_exists_error is not None:
return course_exists_error
if course_key.deprecated:
return JsonResponse({'error': 'Uploading assets for the legacy course is not available.'}, status=400)
# compute a 'filename' which is similar to the location formatting, we're
# using the 'filename' nomenclature since we're using a FileSystem paradigm
# here. We're just imposing the Location string formatting expectations to
# keep things a bit more consistent
upload_file = request.FILES['file']
try:
content = update_course_run_asset(course_key, upload_file)
except AssetSizeTooLargeException as exception:
return JsonResponse({'error': str(exception)}, status=413)
# readback the saved content - we need the database timestamp
readback = contentstore().find(content.location)
locked = getattr(content, 'locked', False)
return JsonResponse({
'asset': _get_asset_json(
content.name,
content.content_type,
readback.last_modified_at,
content.location,
content.thumbnail_location,
locked,
course_key,
),
'msg': _('Upload completed')
})
def _get_error_if_course_does_not_exist(course_key): # lint-amnesty, pylint: disable=missing-function-docstring
try:
modulestore().get_course(course_key)
except ItemNotFoundError:
logging.error('Could not find course: %s', course_key)
return HttpResponseBadRequest()
def _get_file_metadata_as_dictionary(upload_file): # lint-amnesty, pylint: disable=missing-function-docstring
# compute a 'filename' which is similar to the location formatting; we're
# using the 'filename' nomenclature since we're using a FileSystem paradigm
# here; we're just imposing the Location string formatting expectations to
# keep things a bit more consistent
return {
'upload_file': upload_file,
'filename': upload_file.name,
'mime_type': upload_file.content_type,
'upload_file_size': get_file_size(upload_file)
}
"""Exposes service method in asset_storage_handlers without breaking existing bindings/dependencies"""
return update_course_run_asset_source_function(course_key, upload_file)
def get_file_size(upload_file):
"""returns the size of the uploaded file"""
# can be used for mocking test file sizes.
return upload_file.size
def _check_file_size_is_too_large(file_metadata):
"""verifies whether file size is greater than allowed file size"""
upload_file_size = file_metadata['upload_file_size']
maximum_file_size_in_megabytes = settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB
maximum_file_size_in_bytes = maximum_file_size_in_megabytes * 1000 ** 2
return upload_file_size > maximum_file_size_in_bytes
def _get_file_too_large_error_message(filename):
"""returns formatted error message for large files"""
return _(
'File {filename} exceeds maximum size of '
'{maximum_size_in_megabytes} MB.'
).format(
filename=filename,
maximum_size_in_megabytes=settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB,
)
def _get_file_content_and_path(file_metadata, course_key):
"""returns contents of the uploaded file and path for temporary uploaded file"""
content_location = StaticContent.compute_location(course_key, file_metadata['filename'])
upload_file = file_metadata['upload_file']
file_can_be_chunked = upload_file.multiple_chunks()
static_content_partial = partial(StaticContent, content_location, file_metadata['filename'],
file_metadata['mime_type'])
if file_can_be_chunked:
content = static_content_partial(upload_file.chunks())
temporary_file_path = upload_file.temporary_file_path()
else:
content = static_content_partial(upload_file.read())
temporary_file_path = None
return content, temporary_file_path
def _check_thumbnail_uploaded(thumbnail_content):
"""returns whether thumbnail is None"""
return thumbnail_content is not None
def _get_thumbnail_asset_key(asset, course_key):
"""returns thumbnail asset key"""
# note, due to the schema change we may not have a 'thumbnail_location' in the result set
thumbnail_location = asset.get('thumbnail_location', None)
thumbnail_asset_key = None
if thumbnail_location:
thumbnail_path = thumbnail_location[4]
thumbnail_asset_key = course_key.make_asset_key('thumbnail', thumbnail_path)
return thumbnail_asset_key
@require_http_methods(('DELETE', 'POST', 'PUT'))
@login_required
@ensure_csrf_cookie
def _update_asset(request, course_key, asset_key):
"""
restful CRUD operations for a course asset.
Currently only DELETE, POST, and PUT methods are implemented.
asset_path_encoding: the odd /c4x/org/course/category/name repr of the asset (used by Backbone as the id)
"""
if request.method == 'DELETE':
try:
delete_asset(course_key, asset_key)
return JsonResponse()
except AssetNotFoundException:
return JsonResponse(status=404)
elif request.method in ('PUT', 'POST'):
if 'file' in request.FILES:
return _upload_asset(request, course_key)
# update existing asset
try:
modified_asset = json.loads(request.body.decode('utf8'))
except ValueError:
return HttpResponseBadRequest()
contentstore().set_attr(asset_key, 'locked', modified_asset['locked'])
# delete the asset from the cache so we check the lock status the next time it is requested.
del_cached_content(asset_key)
return JsonResponse(modified_asset, status=201)
def _save_content_to_trash(content):
"""saves the content to trash"""
contentstore('trashcan').save(content)
"""Exposes service method in asset_storage_handlers without breaking existing bindings/dependencies"""
return get_file_size_source_function(upload_file)
def delete_asset(course_key, asset_key):
"""deletes the cached content based on asset key"""
content = _check_existence_and_get_asset_content(asset_key)
_save_content_to_trash(content)
_delete_thumbnail(content.thumbnail_location, course_key, asset_key)
contentstore().delete(content.get_id())
del_cached_content(content.location)
def _check_existence_and_get_asset_content(asset_key): # lint-amnesty, pylint: disable=missing-function-docstring
try:
content = contentstore().find(asset_key)
return content
except NotFoundError:
raise AssetNotFoundException # lint-amnesty, pylint: disable=raise-missing-from
def _delete_thumbnail(thumbnail_location, course_key, asset_key): # lint-amnesty, pylint: disable=missing-function-docstring
if thumbnail_location is not None:
# We are ignoring the value of the thumbnail_location-- we only care whether
# or not a thumbnail has been stored, and we can now easily create the correct path.
thumbnail_location = course_key.make_asset_key('thumbnail', asset_key.block_id)
try:
thumbnail_content = contentstore().find(thumbnail_location)
_save_content_to_trash(thumbnail_content)
contentstore().delete(thumbnail_content.get_id())
del_cached_content(thumbnail_location)
except Exception: # pylint: disable=broad-except
logging.warning('Could not delete thumbnail: %s', thumbnail_location)
"""Exposes service method in asset_storage_handlers without breaking existing bindings/dependencies"""
return delete_asset_source_function(course_key, asset_key)
def _get_asset_json(display_name, content_type, date, location, thumbnail_location, locked, course_key):
'''
Helper method for formatting the asset information to send to client.
'''
asset_url = StaticContent.serialize_asset_key_with_slash(location)
external_url = urljoin(configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL), asset_url)
portable_url = StaticContent.get_static_path_from_location(location)
return {
'display_name': display_name,
'content_type': content_type,
'date_added': get_default_time_display(date),
'url': asset_url,
'external_url': external_url,
'portable_url': portable_url,
'thumbnail': StaticContent.serialize_asset_key_with_slash(thumbnail_location) if thumbnail_location else None,
'locked': locked,
'static_full_url': StaticContent.get_canonicalized_asset_path(course_key, portable_url, '', []),
# needed for Backbone delete/update.
'id': str(location)
}
return get_asset_json_source_function(
display_name,
content_type,
date,
location,
thumbnail_location,
locked,
course_key,
)
def _update_asset(request, course_key, asset_key):
return update_asset_source_function(request, course_key, asset_key)

View File

@@ -43,15 +43,16 @@ from ..helpers import (
)
from .preview import get_preview_fragment
from cms.djangoapps.contentstore.xblock_services import (
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import (
handle_xblock,
create_xblock_info,
load_services_for_studio,
get_block_info,
get_xblock,
delete_orphans,
usage_key_with_run,
)
from cms.djangoapps.contentstore.xblock_storage_handlers.xblock_helpers import usage_key_with_run
__all__ = [
"orphan_handler",

View File

@@ -10,6 +10,7 @@ from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from django.views.decorators.http import require_GET
from opaque_keys import InvalidKeyError
@@ -27,17 +28,13 @@ from common.djangoapps.xblock_django.models import XBlockStudioConfigurationFlag
from cms.djangoapps.contentstore.toggles import use_new_problem_editor
from openedx.core.lib.xblock_utils import get_aside_from_xblock, is_xblock_aside
from openedx.core.djangoapps.discussions.models import DiscussionsConfiguration
try:
# Technically this is a django app plugin, so we should not error if it's not installed:
import openedx.core.djangoapps.content_staging.api as content_staging_api
except ImportError:
content_staging_api = None
from openedx.core.djangoapps.content_staging import api as content_staging_api
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from ..utils import get_lms_link_for_item, get_sibling_urls, reverse_course_url
from ..toggles import use_new_unit_page
from ..utils import get_lms_link_for_item, get_sibling_urls, reverse_course_url, get_unit_url
from ..helpers import get_parent_xblock, is_unit, xblock_type_display_name
from cms.djangoapps.contentstore.xblock_services.xblock_service import (
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import (
add_container_page_publishing_info,
create_xblock_info,
load_services_for_studio,
@@ -131,7 +128,6 @@ def container_handler(request, usage_key_string):
course, xblock, lms_link, preview_lms_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
component_templates = get_component_templates(course)
ancestor_xblocks = []
parent = get_parent_xblock(xblock)
@@ -140,6 +136,9 @@ def container_handler(request, usage_key_string):
is_unit_page = is_unit(xblock)
unit = xblock if is_unit_page else None
if is_unit_page and use_new_unit_page(course.id):
return redirect(get_unit_url(course.id, unit.location))
is_first = True
block = xblock
@@ -195,11 +194,7 @@ def container_handler(request, usage_key_string):
index += 1
# Get the status of the user's clipboard so they can paste components if they have something to paste
if content_staging_api:
user_clipboard = content_staging_api.get_user_clipboard_json(request.user.id, request)
else:
user_clipboard = {"content": None}
user_clipboard = content_staging_api.get_user_clipboard_json(request.user.id, request)
return render_to_response('container.html', {
'language_code': request.LANGUAGE_CODE,
'context_course': course, # Needed only for display of menus at top of page.

View File

@@ -1,8 +1,6 @@
"""
Views related to operations on course objects
"""
import copy
import json
import logging
@@ -45,19 +43,22 @@ from common.djangoapps.student.auth import (
has_course_author_access,
has_studio_read_access,
has_studio_write_access,
has_studio_advanced_settings_access
has_studio_advanced_settings_access,
is_content_creator,
)
from common.djangoapps.student.roles import (
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
UserBasedRole
UserBasedRole,
OrgStaffRole
)
from common.djangoapps.util.date_utils import get_default_time_display
from common.djangoapps.util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from common.djangoapps.util.string_utils import _has_non_ascii_characters
from common.djangoapps.xblock_django.api import deprecated_xblocks
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.content_staging import api as content_staging_api
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
@@ -85,25 +86,39 @@ from ..course_group_config import (
from ..course_info_model import delete_course_update, get_course_updates, update_course_updates
from ..courseware_index import CoursewareSearchIndexer, SearchIndexingError
from ..tasks import rerun_course as rerun_course_task
from ..toggles import split_library_view_on_dashboard
from ..toggles import (
default_enable_flexible_peer_openassessments,
split_library_view_on_dashboard,
use_new_course_outline_page,
use_new_home_page,
use_new_updates_page,
use_new_advanced_settings_page,
use_new_grading_page,
use_new_schedule_details_page
)
from ..utils import (
add_instructor,
get_course_settings,
get_course_grading,
get_lms_link_for_item,
get_proctored_exam_settings_url,
get_course_outline_url,
get_studio_home_url,
get_updates_url,
get_advanced_settings_url,
get_grading_url,
get_schedule_details_url,
initialize_permissions,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_url,
reverse_usage_url,
update_course_discussions_settings,
update_course_details,
update_course_discussions_settings,
)
from .component import ADVANCED_COMPONENT_TYPES
from ..helpers import is_content_creator
from cms.djangoapps.contentstore.xblock_services.xblock_service import (
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import (
create_xblock_info,
)
from .library import (
@@ -533,6 +548,8 @@ def course_listing(request):
"""
List all courses and libraries available to the logged in user
"""
if use_new_home_page():
return redirect(get_studio_home_url())
optimization_enabled = GlobalStaff().has_user(request.user) and ENABLE_GLOBAL_STAFF_OPTIMIZATION.is_enabled()
@@ -587,6 +604,7 @@ def course_listing(request):
'optimization_enabled': optimization_enabled,
'active_tab': 'courses',
'allowed_organizations': get_allowed_organizations(user),
'allowed_organizations_for_libraries': get_allowed_organizations_for_libraries(user),
'can_create_organizations': user_can_create_organizations(user),
})
@@ -614,6 +632,7 @@ def library_listing(request):
'split_studio_home': split_library_view_on_dashboard(),
'active_tab': 'libraries',
'allowed_organizations': get_allowed_organizations(request.user),
'allowed_organizations_for_libraries': get_allowed_organizations_for_libraries(request.user),
'can_create_organizations': user_can_create_organizations(request.user),
}
return render_to_response('index.html', data)
@@ -691,6 +710,8 @@ def course_index(request, course_key):
course_block = get_course_and_check_access(course_key, request.user, depth=None)
if not course_block:
raise Http404
if use_new_course_outline_page(course_key):
return redirect(get_course_outline_url(course_key))
lms_link = get_lms_link_for_item(course_block.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
@@ -725,6 +746,8 @@ def course_index(request, course_key):
advanced_dict = CourseMetadata.fetch(course_block)
proctoring_errors = CourseMetadata.validate_proctoring_settings(course_block, advanced_dict, request.user)
user_clipboard = content_staging_api.get_user_clipboard_json(request.user.id, request)
return render_to_response('course_outline.html', {
'language_code': request.LANGUAGE_CODE,
'context_course': course_block,
@@ -732,6 +755,7 @@ def course_index(request, course_key):
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None, # lint-amnesty, pylint: disable=line-too-long
'initial_user_clipboard': user_clipboard,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
@@ -974,7 +998,13 @@ def create_new_course(user, org, number, run, fields):
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, user, org, number, run, fields)
add_organization_course(org_data, new_course.id)
update_course_discussions_settings(new_course.id)
update_course_discussions_settings(new_course)
# Enable certain fields rolling forward, where configured
if default_enable_flexible_peer_openassessments(new_course.id):
new_course.force_on_flexible_peer_openassessments = True
modulestore().update_item(new_course, new_course.published_by)
return new_course
@@ -1038,6 +1068,10 @@ def rerun_course(user, source_course_key, org, number, run, fields, background=T
fields['enrollment_end'] = None
fields['video_upload_pipeline'] = {}
# Enable certain fields rolling forward, where configured
if default_enable_flexible_peer_openassessments(source_course_key):
fields['force_on_flexible_peer_openassessments'] = True
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
args = [str(source_course_key), str(destination_course_key), user.id, json_fields]
@@ -1066,6 +1100,8 @@ def course_info_handler(request, course_key_string):
course_block = get_course_and_check_access(course_key, request.user)
if not course_block:
raise Http404
if use_new_updates_page(course_key):
return redirect(get_updates_url(course_key))
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
return render_to_response(
'course_info.html',
@@ -1150,6 +1186,8 @@ def settings_handler(request, course_key_string): # lint-amnesty, pylint: disab
with modulestore().bulk_operations(course_key):
course_block = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
if use_new_schedule_details_page(course_key):
return redirect(get_schedule_details_url(course_key))
settings_context = get_course_settings(request, course_key, course_block)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''): # pylint: disable=too-many-nested-blocks
@@ -1191,6 +1229,8 @@ def grading_handler(request, course_key_string, grader_index=None):
raise PermissionDenied()
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
if use_new_grading_page(course_key):
return redirect(get_grading_url(course_key))
grading_context = get_course_grading(course_key)
return render_to_response('settings_graders.html', grading_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
@@ -1286,6 +1326,8 @@ def advanced_settings_handler(request, course_key_string):
advanced_dict.get('mobile_available')['deprecated'] = True
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
if use_new_advanced_settings_page(course_key):
return redirect(get_advanced_settings_url(course_key))
publisher_enabled = configuration_helpers.get_value_for_org(
course_block.location.org,
'ENABLE_PUBLISHER',
@@ -1792,6 +1834,18 @@ def get_allowed_organizations(user):
return []
def get_allowed_organizations_for_libraries(user):
"""
Helper method for returning the list of organizations for which the user is allowed to create libraries.
"""
if settings.FEATURES.get('ENABLE_ORGANIZATION_STAFF_ACCESS_FOR_CONTENT_LIBRARIES', False):
return get_organizations_for_non_course_creators(user)
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
return get_organizations(user)
else:
return []
def user_can_create_organizations(user):
"""
Returns True if the user can create organizations.
@@ -1799,6 +1853,18 @@ def user_can_create_organizations(user):
return user.is_staff or not settings.FEATURES.get('ENABLE_CREATOR_GROUP', False)
def get_organizations_for_non_course_creators(user):
"""
Returns the list of organizations which the user is a staff member of, as a list of strings.
"""
orgs_map = set()
orgs = OrgStaffRole().get_orgs_for_user(user)
# deduplicate
for org in orgs:
orgs_map.add(org)
return list(orgs_map)
def get_organizations(user):
"""
Returns the list of organizations for which the user is allowed to create courses.

View File

@@ -24,8 +24,8 @@ from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disa
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from ..helpers import remove_entrance_exam_graders
from ..xblock_services.create_xblock import create_xblock
from cms.djangoapps.contentstore.xblock_services.xblock_service import delete_item
from cms.djangoapps.contentstore.xblock_storage_handlers.create_xblock import create_xblock
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import delete_item
__all__ = ['entrance_exam', ]

View File

@@ -18,7 +18,7 @@ def jsonable_error(status=500, message="The Studio servers encountered an error"
def outer(func):
@functools.wraps(func)
def inner(request, *args, **kwargs):
if request.is_ajax():
if request.headers.get('x-requested-with') == 'XMLHttpRequest':
content = dump_js_escaped_json({"error": message})
return HttpResponse(content, content_type="application/json", # lint-amnesty, pylint: disable=http-response-with-content-type-json
status=status)

View File

@@ -19,6 +19,7 @@ from django.core.files import File
from django.core.files.storage import FileSystemStorage
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseNotFound, StreamingHttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
@@ -40,7 +41,8 @@ from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disa
from ..storage import course_import_export_storage
from ..tasks import CourseExportTask, CourseImportTask, export_olx, import_olx
from ..utils import reverse_course_url, reverse_library_url
from ..toggles import use_new_export_page, use_new_import_page
from ..utils import reverse_course_url, reverse_library_url, get_export_url, get_import_url
__all__ = [
'import_handler', 'import_status_handler',
@@ -89,6 +91,8 @@ def import_handler(request, course_key_string):
else:
return _write_chunk(request, courselike_key)
elif request.method == 'GET': # assume html
if use_new_import_page(courselike_key):
return redirect(get_import_url(courselike_key))
status_url = reverse_course_url(
"import_status_handler", courselike_key, kwargs={'filename': "fillerName"}
)
@@ -336,6 +340,8 @@ def export_handler(request, course_key_string):
export_olx.delay(request.user.id, course_key_string, request.LANGUAGE_CODE)
return JsonResponse({'ExportStatus': 1})
elif 'text/html' in requested_format:
if use_new_export_page(course_key):
return redirect(get_export_url(course_key))
return render_to_response('export.html', context)
else:
# Only HTML request format is supported (no JSON).

View File

@@ -30,20 +30,21 @@ from common.djangoapps.student.auth import (
STUDIO_VIEW_USERS,
get_user_permissions,
has_studio_read_access,
has_studio_write_access
has_studio_write_access,
)
from common.djangoapps.student.roles import (
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole
LibraryUserRole,
OrgStaffRole,
UserBasedRole,
)
from common.djangoapps.util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from ..config.waffle import REDIRECT_TO_LIBRARY_AUTHORING_MICROFRONTEND
from ..utils import add_instructor, reverse_library_url
from .component import CONTAINER_TEMPLATES, get_component_templates
from ..helpers import is_content_creator
from cms.djangoapps.contentstore.xblock_services.xblock_service import create_xblock_info
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import create_xblock_info
from .user import user_with_role
__all__ = ['library_handler', 'manage_library_users']
@@ -79,10 +80,11 @@ def user_can_create_library(user, org=None):
elif user.is_staff:
return True
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
has_course_creator_role = True
if org:
has_course_creator_role = is_content_creator(user, org)
return get_course_creator_status(user) == 'granted' and has_course_creator_role
is_course_creator = get_course_creator_status(user) == 'granted'
has_org_staff_role = OrgStaffRole().get_orgs_for_user(user).exists()
has_course_staff_role = UserBasedRole(user=user, role=CourseStaffRole.ROLE).courses_with_role().exists()
return is_course_creator or has_org_staff_role or has_course_staff_role
else:
# EDUCATOR-1924: DISABLE_LIBRARY_CREATION overrides DISABLE_COURSE_CREATION, if present.
disable_library_creation = settings.FEATURES.get('DISABLE_LIBRARY_CREATION', None)

View File

@@ -24,10 +24,10 @@ from xmodule.partitions.partitions_service import PartitionService
from xmodule.services import SettingsService, TeamsConfigurationService
from xmodule.studio_editable import has_author_view
from xmodule.util.sandboxing import SandboxService
from xmodule.util.xmodule_django import add_webpack_to_fragment
from xmodule.util.builtin_assets import add_webpack_js_to_fragment
from xmodule.x_module import AUTHOR_VIEW, PREVIEW_VIEWS, STUDENT_VIEW, XModuleMixin
from cms.djangoapps.xblock_config.models import StudioConfig
from cms.djangoapps.contentstore.toggles import individualize_anonymous_user_id, ENABLE_COPY_PASTE_FEATURE
from cms.djangoapps.contentstore.toggles import individualize_anonymous_user_id
from cms.lib.xblock.field_data import CmsFieldData
from common.djangoapps.static_replace.services import ReplaceURLService
from common.djangoapps.static_replace.wrapper import replace_urls_wrapper
@@ -149,14 +149,13 @@ def preview_layout_asides(block, context, frag, view_name, aside_frag_fns, wrap_
return result
def _prepare_runtime_for_preview(request, block, field_data):
def _prepare_runtime_for_preview(request, block):
"""
Sets properties in the runtime of the specified block that is
required for rendering block previews.
request: The active django request
block: An XBlock
field_data: Wrapped field data for previews
"""
course_id = block.location.course_key
@@ -199,7 +198,6 @@ def _prepare_runtime_for_preview(request, block, field_data):
deprecated_anonymous_user_id = anonymous_id_for_user(request.user, None)
services = {
"field-data": field_data,
"i18n": XBlockI18nService,
'mako': mako_service,
"settings": SettingsService(),
@@ -222,7 +220,7 @@ def _prepare_runtime_for_preview(request, block, field_data):
# Set up functions to modify the fragment produced by student_view
block.runtime.wrappers = wrappers
block.runtime.wrappers_asides = wrappers_asides
block.runtime._runtime_services.update(services) # lint-amnesty, pylint: disable=protected-access
block.runtime._services.update(services) # pylint: disable=protected-access
# xmodules can check for this attribute during rendering to determine if
# they are being rendered for preview (i.e. in Studio)
@@ -266,9 +264,7 @@ def _load_preview_block(request: Request, block: XModuleMixin):
else:
wrapper = partial(LmsFieldData, student_data=student_data)
# wrap the _field_data upfront to pass to _prepare_runtime_for_preview
wrapped_field_data = wrapper(block._field_data) # pylint: disable=protected-access
_prepare_runtime_for_preview(request, block, wrapped_field_data)
_prepare_runtime_for_preview(request, block)
block.bind_for_student(
request.user.id,
@@ -305,8 +301,6 @@ def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False):
can_edit = context.get('can_edit', True)
# Is this a course or a library?
is_course = xblock.scope_ids.usage_id.context_key.is_course
# Copy-paste is a new feature; while we are beta-testing it, only beta users with the Waffle flag enabled see it
enable_copy_paste = can_edit and is_course and ENABLE_COPY_PASTE_FEATURE.is_enabled()
template_context = {
'xblock_context': context,
'xblock': xblock,
@@ -315,7 +309,6 @@ def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False):
'is_root': is_root,
'is_reorderable': is_reorderable,
'can_edit': can_edit,
'enable_copy_paste': enable_copy_paste,
'can_edit_visibility': context.get('can_edit_visibility', is_course),
'selected_groups_label': selected_groups_label,
'can_add': context.get('can_add', True),
@@ -323,7 +316,7 @@ def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False):
'language': getattr(course, 'language', None)
}
add_webpack_to_fragment(frag, "js/factories/xblock_validation")
add_webpack_js_to_fragment(frag, "js/factories/xblock_validation")
html = render_to_string('studio_xblock_wrapper.html', template_context)
frag = wrap_fragment(frag, html)

View File

@@ -7,6 +7,7 @@ from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseNotFound
from django.shortcuts import redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from opaque_keys.edx.keys import CourseKey, UsageKey
@@ -19,7 +20,8 @@ from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException, StaticT
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from ..utils import get_lms_link_for_item, get_pages_and_resources_url
from ..toggles import use_new_custom_pages
from ..utils import get_lms_link_for_item, get_pages_and_resources_url, get_custom_pages_url
__all__ = ["tabs_handler", "update_tabs_handler"]
@@ -63,7 +65,8 @@ def tabs_handler(request, course_key_string):
elif request.method == "GET": # assume html
# get all tabs from the tabs list: static tabs (a.k.a. user-created tabs) and built-in tabs
# present in the same order they are displayed in LMS
if use_new_custom_pages(course_key):
return redirect(get_custom_pages_url(course_key))
tabs_to_render = list(get_course_tabs(course_item, request.user))
return render_to_response(

View File

@@ -366,7 +366,7 @@ class UploadTestCase(AssetsTestCase):
(MAX_FILE_SIZE, "justequals.file.test", 200),
(MAX_FILE_SIZE + 90, "large.file.test", 413),
)
@mock.patch('cms.djangoapps.contentstore.views.assets.get_file_size')
@mock.patch('cms.djangoapps.contentstore.asset_storage_handlers.get_file_size')
def test_file_size(self, case, get_file_size):
max_file_size, name, status_code = case

View File

@@ -62,7 +62,7 @@ from cms.djangoapps.contentstore.utils import (
duplicate_block,
update_from_source,
)
from cms.djangoapps.contentstore.xblock_services import xblock_service as item_module
from cms.djangoapps.contentstore.xblock_storage_handlers import view_handlers as item_module
from common.djangoapps.student.tests.factories import StaffFactory, UserFactory
from common.djangoapps.xblock_django.models import (
XBlockConfiguration,
@@ -74,7 +74,7 @@ from lms.djangoapps.lms_xblock.mixin import NONSENSICAL_ACCESS_RESTRICTION
from openedx.core.djangoapps.discussions.models import DiscussionsConfiguration
from ..component import component_handler, get_component_templates
from cms.djangoapps.contentstore.xblock_services.xblock_service import (
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import (
ALWAYS,
VisibilityState,
get_block_info,
@@ -111,13 +111,11 @@ class ItemTest(CourseTestCase):
self.course_key = self.course.id
self.usage_key = self.course.location
def get_item_from_modulestore(self, usage_key, verify_is_draft=False):
def get_item_from_modulestore(self, usage_key):
"""
Get the item referenced by the UsageKey from the modulestore
"""
item = self.store.get_item(usage_key)
if verify_is_draft:
self.assertTrue(getattr(item, "is_draft", False))
return item
def response_usage_key(self, response):
@@ -540,9 +538,8 @@ class GetItemTest(ItemTest):
class DeleteItem(ItemTest):
"""Tests for '/xblock' DELETE url."""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_static_page(self, store):
course = CourseFactory.create(default_store=store)
def test_delete_static_page(self):
course = CourseFactory.create()
# Add static tab
resp = self.create_xblock(
category="static_tab", parent_usage_key=course.location
@@ -589,7 +586,7 @@ class TestCreateItem(ItemTest):
parent_usage_key=vert_usage_key, category="problem", boilerplate=template_id
)
prob_usage_key = self.response_usage_key(resp)
problem = self.get_item_from_modulestore(prob_usage_key, verify_is_draft=True)
problem = self.get_item_from_modulestore(prob_usage_key)
# check against the template
template = ProblemBlock.get_template(template_id)
self.assertEqual(problem.data, template["data"])
@@ -807,9 +804,7 @@ class TestDuplicateItem(ItemTest, DuplicateHelper, OpenEdxEventsTestMixin):
self.html_usage_key = self.response_usage_key(resp)
# Create a second sequential just (testing children of children)
self.create_xblock(
parent_usage_key=self.chapter_usage_key, category="sequential2"
)
self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential')
def test_duplicate_equality(self):
"""
@@ -976,10 +971,10 @@ class TestMoveItem(ItemTest):
if not default_store:
default_store = self.store.default_modulestore.get_modulestore_type()
self.course = CourseFactory.create(default_store=default_store)
course = CourseFactory.create(default_store=default_store)
# Create group configurations
self.course.user_partitions = [
course.user_partitions = [
UserPartition(
0,
"first_partition",
@@ -987,18 +982,18 @@ class TestMoveItem(ItemTest):
[Group("0", "alpha"), Group("1", "beta")],
)
]
self.store.update_item(self.course, self.user.id)
self.store.update_item(course, self.user.id)
# Create a parent chapter
chap1 = self.create_xblock(
parent_usage_key=self.course.location,
parent_usage_key=course.location,
display_name="chapter1",
category="chapter",
)
self.chapter_usage_key = self.response_usage_key(chap1)
chap2 = self.create_xblock(
parent_usage_key=self.course.location,
parent_usage_key=course.location,
display_name="chapter2",
category="chapter",
)
@@ -1053,6 +1048,8 @@ class TestMoveItem(ItemTest):
)
self.split_test_usage_key = self.response_usage_key(resp)
self.course = self.store.get_item(course.location)
def setup_and_verify_content_experiment(self, partition_id):
"""
Helper method to set up group configurations to content experiment.
@@ -1060,9 +1057,7 @@ class TestMoveItem(ItemTest):
Arguments:
partition_id (int): User partition id.
"""
split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(split_test.user_partition_id, -1)
@@ -1073,9 +1068,7 @@ class TestMoveItem(ItemTest):
reverse_usage_url("xblock_handler", self.split_test_usage_key),
data={"metadata": {"user_partition_id": str(partition_id)}},
)
split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
self.assertEqual(split_test.user_partition_id, partition_id)
self.assertEqual(
len(split_test.children),
@@ -1141,15 +1134,11 @@ class TestMoveItem(ItemTest):
self.assertIn(source_usage_key, target_parent.children)
self.assertNotIn(source_usage_key, source_parent.children)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_component(self, store_type):
def test_move_component(self):
"""
Test move component with different xblock types.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
self.setup_course()
for source_usage_key, target_usage_key in [
(self.html_usage_key, self.vert2_usage_key),
(self.vert_usage_key, self.seq2_usage_key),
@@ -1391,9 +1380,7 @@ class TestMoveItem(ItemTest):
reverse_usage_url("xblock_handler", child_split_test_usage_key),
data={"metadata": {"user_partition_id": str(0)}},
)
child_split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
child_split_test = self.get_item_from_modulestore(self.split_test_usage_key)
# Try to move content experiment further down the level to a child group A nested inside main group A.
response = self._move_component(
@@ -1469,6 +1456,7 @@ class TestMoveItem(ItemTest):
"""
group1 = self.course.user_partitions[0].groups[0]
group2 = self.course.user_partitions[0].groups[1]
vert1 = self.store.get_item(self.vert_usage_key)
vert2 = self.store.get_item(self.vert2_usage_key)
html = self.store.get_item(self.html_usage_key)
@@ -1481,10 +1469,12 @@ class TestMoveItem(ItemTest):
html.runtime._services["partitions"] = partitions_service # lint-amnesty, pylint: disable=protected-access
# Set access settings so html will contradict vert2 when moved into that unit
vert1.group_access = {self.course.user_partitions[0].id: [group2.id]}
vert2.group_access = {self.course.user_partitions[0].id: [group1.id]}
html.group_access = {self.course.user_partitions[0].id: [group2.id]}
self.store.update_item(html, self.user.id)
self.store.update_item(vert2, self.user.id)
vert1 = self.store.update_item(vert1, self.user.id)
vert2 = self.store.update_item(vert2, self.user.id)
html = self.store.update_item(html, self.user.id)
# Verify that there is no warning when html is in a non contradicting unit
validation = html.validate()
@@ -1493,7 +1483,7 @@ class TestMoveItem(ItemTest):
# Now move it and confirm that the html component has been moved into vertical 2
self.assert_move_item(self.html_usage_key, self.vert2_usage_key)
html.parent = self.vert2_usage_key
self.store.update_item(html, self.user.id)
html = self.store.update_item(html, self.user.id)
validation = html.validate()
self.assertEqual(len(validation.messages), 1)
self._verify_validation_message(
@@ -1505,11 +1495,11 @@ class TestMoveItem(ItemTest):
# Move the html component back and confirm that the warning is gone again
self.assert_move_item(self.html_usage_key, self.vert_usage_key)
html.parent = self.vert_usage_key
self.store.update_item(html, self.user.id)
html = self.store.update_item(html, self.user.id)
validation = html.validate()
self.assertEqual(len(validation.messages), 0)
@patch("cms.djangoapps.contentstore.xblock_services.xblock_service.log")
@patch("cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.log")
def test_move_logging(self, mock_logger):
"""
Test logging when an item is successfully moved.
@@ -1527,16 +1517,12 @@ class TestMoveItem(ItemTest):
insert_at,
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_and_discard_changes(self, store_type):
def test_move_and_discard_changes(self):
"""
Verifies that discard changes operation brings moved component back to source location and removes the component
from target location.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
self.setup_course()
old_parent_loc = self.store.get_parent_location(self.html_usage_key)
@@ -1594,15 +1580,11 @@ class TestMoveItem(ItemTest):
self.assertIn(self.html_usage_key, source_parent.children)
self.assertNotIn(self.html_usage_key, target_parent.children)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_item_not_found(self, store_type=ModuleStoreEnum.Type.mongo):
def test_move_item_not_found(self):
"""
Test that an item not found exception raised when an item is not found when getting the item.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
self.setup_course()
data = {
"move_source_locator": str(
@@ -1752,30 +1734,25 @@ class TestEditItem(TestEditItemSetup):
self.client.ajax_post(
self.problem_update_url, data={"metadata": {"rerandomize": "onreset"}}
)
problem = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
self.assertEqual(problem.rerandomize, "onreset")
problem = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(problem.rerandomize, 'onreset')
self.client.ajax_post(
self.problem_update_url, data={"metadata": {"rerandomize": None}}
)
problem = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
self.assertEqual(problem.rerandomize, "never")
problem = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(problem.rerandomize, 'never')
def test_null_field(self):
"""
Sending null in for a field 'deletes' it
"""
problem = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
problem = self.get_item_from_modulestore(self.problem_usage_key)
self.assertIsNotNone(problem.markdown)
self.client.ajax_post(self.problem_update_url, data={"nullout": ["markdown"]})
problem = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
self.client.ajax_post(
self.problem_update_url,
data={'nullout': ['markdown']}
)
problem = self.get_item_from_modulestore(self.problem_usage_key)
self.assertIsNone(problem.markdown)
def test_date_fields(self):
@@ -1831,9 +1808,7 @@ class TestEditItem(TestEditItemSetup):
}
},
)
problem = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
problem = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(problem.display_name, new_display_name)
self.assertEqual(problem.max_attempts, new_max_attempts)
@@ -2052,9 +2027,7 @@ class TestEditItem(TestEditItemSetup):
},
)
self.assertFalse(self._is_location_published(self.problem_usage_key))
draft = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
draft = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(draft.display_name, new_display_name)
# Publish the item
@@ -2112,9 +2085,7 @@ class TestEditItem(TestEditItemSetup):
self.client.ajax_post(
self.problem_update_url, data={"metadata": {"due": "2077-10-10T04:00Z"}}
)
updated_draft = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
updated_draft = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(updated_draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
self.assertIsNone(published.due)
# Fetch the published version again to make sure the due date is still unset.
@@ -2154,9 +2125,7 @@ class TestEditItem(TestEditItemSetup):
)
# Both published and draft content should be different
draft = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
draft = self.get_item_from_modulestore(self.problem_usage_key)
self.assertNotEqual(draft.data, published.data)
# Get problem by 'xblock_handler'
@@ -2174,9 +2143,7 @@ class TestEditItem(TestEditItemSetup):
self.assertEqual(resp.status_code, 200)
# Both published and draft content should still be different
draft = self.get_item_from_modulestore(
self.problem_usage_key, verify_is_draft=True
)
draft = self.get_item_from_modulestore(self.problem_usage_key)
self.assertNotEqual(draft.data, published.data)
# Fetch the published version again to make sure the data is correct.
published = modulestore().get_item(
@@ -2209,18 +2176,6 @@ class TestEditItem(TestEditItemSetup):
self._verify_published_with_no_draft(unit_usage_key)
self._verify_published_with_no_draft(html_usage_key)
# Make a draft for the unit and verify that the problem also has a draft
resp = self.client.ajax_post(
unit_update_url,
data={
"id": str(unit_usage_key),
"metadata": {},
},
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_draft(unit_usage_key)
self._verify_published_with_draft(html_usage_key)
def test_field_value_errors(self):
"""
Test that if the user's input causes a ValueError on an XBlock field,
@@ -2346,9 +2301,7 @@ class TestEditSplitModule(ItemTest):
)
# Verify the partition_id was saved.
split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
self.assertEqual(partition_id, split_test.user_partition_id)
return split_test
@@ -2356,7 +2309,7 @@ class TestEditSplitModule(ItemTest):
"""
Verifies the number of children of the split_test instance.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, True)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
self.assertEqual(expected_number, len(split_test.children))
return split_test
@@ -2365,9 +2318,7 @@ class TestEditSplitModule(ItemTest):
Test that verticals are created for the configuration groups when
a spit test block is edited.
"""
split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(-1, split_test.user_partition_id)
self.assertEqual(0, len(split_test.children))
@@ -2377,12 +2328,8 @@ class TestEditSplitModule(ItemTest):
# Verify that child verticals have been set to match the groups
self.assertEqual(2, len(split_test.children))
vertical_0 = self.get_item_from_modulestore(
split_test.children[0], verify_is_draft=True
)
vertical_1 = self.get_item_from_modulestore(
split_test.children[1], verify_is_draft=True
)
vertical_0 = self.get_item_from_modulestore(split_test.children[0])
vertical_1 = self.get_item_from_modulestore(split_test.children[1])
self.assertEqual("vertical", vertical_0.category)
self.assertEqual("vertical", vertical_1.category)
self.assertEqual(
@@ -2407,9 +2354,7 @@ class TestEditSplitModule(ItemTest):
"""
Test that concise outline for split test component gives display name as group name.
"""
split_test = self.get_item_from_modulestore(
self.split_test_usage_key, verify_is_draft=True
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(split_test.user_partition_id, -1)
self.assertEqual(len(split_test.children), 0)
@@ -2451,15 +2396,9 @@ class TestEditSplitModule(ItemTest):
self.assertEqual(5, len(split_test.children))
self.assertEqual(initial_vertical_0_location, split_test.children[0])
self.assertEqual(initial_vertical_1_location, split_test.children[1])
vertical_0 = self.get_item_from_modulestore(
split_test.children[2], verify_is_draft=True
)
vertical_1 = self.get_item_from_modulestore(
split_test.children[3], verify_is_draft=True
)
vertical_2 = self.get_item_from_modulestore(
split_test.children[4], verify_is_draft=True
)
vertical_0 = self.get_item_from_modulestore(split_test.children[2])
vertical_1 = self.get_item_from_modulestore(split_test.children[3])
vertical_2 = self.get_item_from_modulestore(split_test.children[4])
# Verify that the group_id_to child mapping is correct.
self.assertEqual(3, len(split_test.group_id_to_child))
@@ -3106,39 +3045,25 @@ class TestXBlockInfo(ItemTest):
json_response = json.loads(resp.content.decode("utf-8"))
self.validate_course_xblock_info(json_response, course_outline=True)
@ddt.data(
(ModuleStoreEnum.Type.split, 3, 3),
(ModuleStoreEnum.Type.mongo, 8, 12),
)
@ddt.unpack
def test_xblock_outline_handler_mongo_calls(
self, store_type, chapter_queries, chapter_queries_1
):
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = BlockFactory.create(
parent_location=course.location,
category="chapter",
display_name="Week 1",
)
outline_url = reverse_usage_url("xblock_outline_handler", chapter.location)
with check_mongo_calls(chapter_queries):
self.client.get(outline_url, HTTP_ACCEPT="application/json")
def test_xblock_outline_handler_mongo_calls(self):
course = CourseFactory.create()
chapter = BlockFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
outline_url = reverse_usage_url('xblock_outline_handler', chapter.location)
with check_mongo_calls(3):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
sequential = BlockFactory.create(
parent_location=chapter.location,
category="sequential",
display_name="Sequential 1",
)
sequential = BlockFactory.create(
parent_location=chapter.location, category='sequential', display_name='Sequential 1'
)
BlockFactory.create(
parent_location=sequential.location,
category="vertical",
display_name="Vertical 1",
)
# calls should be same after adding two new children for split only.
with check_mongo_calls(chapter_queries_1):
self.client.get(outline_url, HTTP_ACCEPT="application/json")
BlockFactory.create(
parent_location=sequential.location, category='vertical', display_name='Vertical 1'
)
# calls should be same after adding two new children for split only.
with check_mongo_calls(3):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
def test_entrance_exam_chapter_xblock_info(self):
chapter = BlockFactory.create(
@@ -3264,32 +3189,26 @@ class TestXBlockInfo(ItemTest):
)
self.validate_component_xblock_info(xblock_info)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_validate_start_date(self, store_type):
def test_validate_start_date(self):
"""
Validate if start-date year is less than 1900 reset the date to DEFAULT_START_DATE.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = BlockFactory.create(
parent_location=course.location,
category="chapter",
display_name="Week 1",
)
course = CourseFactory.create()
chapter = BlockFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
chapter.start = datetime(year=1899, month=1, day=1, tzinfo=UTC)
chapter.start = datetime(year=1899, month=1, day=1, tzinfo=UTC)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user,
)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user
)
self.assertEqual(
xblock_info["start"], DEFAULT_START_DATE.strftime("%Y-%m-%dT%H:%M:%SZ")
)
self.assertEqual(xblock_info['start'], DEFAULT_START_DATE.strftime('%Y-%m-%dT%H:%M:%SZ'))
def test_highlights_enabled(self):
self.course.highlights_enabled_for_messaging = True
@@ -3489,9 +3408,11 @@ class TestSpecialExamXBlockInfo(ItemTest):
user_id=user_id,
highlights=["highlight"],
)
# get updated course
self.course = self.store.get_item(self.course.location)
self.course.enable_proctored_exams = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self.course = self.store.update_item(self.course, self.user.id)
def test_proctoring_is_enabled_for_course(self):
course = modulestore().get_item(self.course.location)
@@ -3517,7 +3438,7 @@ class TestSpecialExamXBlockInfo(ItemTest):
category="sequential",
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=True,
is_proctored_enabled=True,
is_time_limited=True,
default_time_limit_minutes=100,
is_onboarding_exam=False,
@@ -3561,7 +3482,7 @@ class TestSpecialExamXBlockInfo(ItemTest):
category="sequential",
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=False,
is_proctored_enabled=False,
is_time_limited=False,
is_onboarding_exam=False,
)
@@ -3589,7 +3510,7 @@ class TestSpecialExamXBlockInfo(ItemTest):
category="sequential",
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=False,
is_proctored_enabled=False,
is_time_limited=False,
is_onboarding_exam=False,
)
@@ -3849,9 +3770,8 @@ class TestXBlockPublishingInfo(ItemTest):
xblock_info = self._get_xblock_info(empty_chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_chapter_self_paced_default_start_date(self, store_type):
course = CourseFactory.create(default_store=store_type)
def test_chapter_self_paced_default_start_date(self):
course = CourseFactory.create()
course.self_paced = True
self.store.update_item(course, self.user.id)
chapter = self._create_child(course, "chapter", "Test Chapter")
@@ -3939,29 +3859,15 @@ class TestXBlockPublishingInfo(ItemTest):
)
def test_partially_released_section(self):
chapter = self._create_child(self.course, "chapter", "Test Chapter")
released_sequential = self._create_child(
chapter, "sequential", "Released Sequential"
)
self._create_child(
released_sequential, "vertical", "Released Unit", publish_item=True
)
self._create_child(
released_sequential, "vertical", "Staff Only Unit", staff_only=True
)
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
released_sequential = self._create_child(chapter, 'sequential', "Released Sequential")
self._create_child(released_sequential, 'vertical', "Released Unit", publish_item=True)
self._create_child(released_sequential, 'vertical', "Staff Only Unit 1", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
published_sequential = self._create_child(
chapter, "sequential", "Published Sequential"
)
self._create_child(
published_sequential, "vertical", "Published Unit", publish_item=True
)
self._create_child(
published_sequential, "vertical", "Staff Only Unit", staff_only=True
)
self._set_release_date(
published_sequential.location, datetime.now(UTC) + timedelta(days=1)
)
published_sequential = self._create_child(chapter, 'sequential', "Published Sequential")
self._create_child(published_sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(published_sequential, 'vertical', "Staff Only Unit 2", staff_only=True)
self._set_release_date(published_sequential.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
# Verify the state of the released sequential
@@ -4191,8 +4097,7 @@ class TestXBlockPublishingInfo(ItemTest):
xblock_info, True, path=self.FIRST_UNIT_PATH
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_self_paced_item_visibility_state(self, store_type):
def test_self_paced_item_visibility_state(self):
"""
Test that in self-paced course, item has `live` visibility state.
Test that when item was initially in `scheduled` state in instructor mode, change course pacing to self-paced,
@@ -4200,7 +4105,7 @@ class TestXBlockPublishingInfo(ItemTest):
"""
# Create course, chapter and setup future release date to make chapter in scheduled state
course = CourseFactory.create(default_store=store_type)
course = CourseFactory.create()
chapter = self._create_child(course, "chapter", "Test Chapter")
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))

View File

@@ -5,7 +5,7 @@ APIs.
"""
from opaque_keys.edx.keys import UsageKey
from rest_framework.test import APIClient
from xmodule.modulestore.django import contentstore, modulestore
from xmodule.modulestore.django import contentstore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, upload_file_to_course
from xmodule.modulestore.tests.factories import BlockFactory, CourseFactory, ToyCourseFactory
@@ -34,7 +34,7 @@ class ClipboardPasteTestCase(ModuleStoreTestCase):
# Check how many blocks are in the vertical currently
parent_key = course_key.make_usage_key("vertical", "vertical_test") # This is the vertical that holds the video
orig_vertical = modulestore().get_item(parent_key)
orig_vertical = self.store.get_item(parent_key)
assert len(orig_vertical.children) == 4
# Copy the video
@@ -51,16 +51,54 @@ class ClipboardPasteTestCase(ModuleStoreTestCase):
new_block_key = UsageKey.from_string(paste_response.json()["locator"])
# Now there should be an extra block in the vertical:
updated_vertical = modulestore().get_item(parent_key)
updated_vertical = self.store.get_item(parent_key)
assert len(updated_vertical.children) == 5
assert updated_vertical.children[-1] == new_block_key
# And it should match the original:
orig_video = modulestore().get_item(video_key)
new_video = modulestore().get_item(new_block_key)
orig_video = self.store.get_item(video_key)
new_video = self.store.get_item(new_block_key)
assert new_video.youtube_id_1_0 == orig_video.youtube_id_1_0
# The new block should store a reference to where it was copied from
assert new_video.copied_from_block == str(video_key)
def test_copy_and_paste_unit(self):
"""
Test copying a unit (vertical) from one course into another
"""
course_key, client = self._setup_course()
dest_course = CourseFactory.create(display_name='Destination Course')
with self.store.bulk_operations(dest_course.id):
dest_chapter = BlockFactory.create(parent=dest_course, category='chapter', display_name='Section')
dest_sequential = BlockFactory.create(parent=dest_chapter, category='sequential', display_name='Subsection')
# Copy the unit
unit_key = course_key.make_usage_key("vertical", "vertical_test")
copy_response = client.post(CLIPBOARD_ENDPOINT, {"usage_key": str(unit_key)}, format="json")
assert copy_response.status_code == 200
# Paste the unit
paste_response = client.post(XBLOCK_ENDPOINT, {
"parent_locator": str(dest_sequential.location),
"staged_content": "clipboard",
}, format="json")
assert paste_response.status_code == 200
dest_unit_key = UsageKey.from_string(paste_response.json()["locator"])
# Now there should be a one unit/vertical as a child of the destination sequential/subsection:
updated_sequential = self.store.get_item(dest_sequential.location)
assert updated_sequential.children == [dest_unit_key]
# And it should match the original:
orig_unit = self.store.get_item(unit_key)
dest_unit = self.store.get_item(dest_unit_key)
assert len(orig_unit.children) == len(dest_unit.children)
# Check details of the fourth child (a poll)
orig_poll = self.store.get_item(orig_unit.children[3])
dest_poll = self.store.get_item(dest_unit.children[3])
assert dest_poll.display_name == orig_poll.display_name
assert dest_poll.question == orig_poll.question
# The new block should store a reference to where it was copied from
assert dest_unit.copied_from_block == str(unit_key)
def test_paste_with_assets(self):
"""
When pasting into a different course, any required static assets should

View File

@@ -9,8 +9,8 @@ from unittest.mock import Mock, patch
from django.http import Http404
from django.test.client import RequestFactory
from django.utils import http
from pytz import UTC
from urllib.parse import quote
import cms.djangoapps.contentstore.views.component as views
from cms.djangoapps.contentstore.tests.test_libraries import LibraryTestCase
@@ -31,29 +31,30 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
def setUp(self):
super().setUp()
self.vertical = self._create_block(self.sequential.location, 'vertical', 'Unit')
self.html = self._create_block(self.vertical.location, "html", "HTML")
self.child_container = self._create_block(self.vertical.location, 'split_test', 'Split Test')
self.child_vertical = self._create_block(self.child_container.location, 'vertical', 'Child Vertical')
self.video = self._create_block(self.child_vertical.location, "video", "My Video")
self.vertical = self._create_block(self.sequential, 'vertical', 'Unit')
self.html = self._create_block(self.vertical, "html", "HTML")
self.child_container = self._create_block(self.vertical, 'split_test', 'Split Test')
self.child_vertical = self._create_block(self.child_container, 'vertical', 'Child Vertical')
self.video = self._create_block(self.child_vertical, "video", "My Video")
self.store = modulestore()
past = datetime.datetime(1970, 1, 1, tzinfo=UTC)
future = datetime.datetime.now(UTC) + datetime.timedelta(days=1)
self.released_private_vertical = self._create_block(
parent_location=self.sequential.location, category='vertical', display_name='Released Private Unit',
parent=self.sequential, category='vertical', display_name='Released Private Unit',
start=past)
self.unreleased_private_vertical = self._create_block(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Private Unit',
parent=self.sequential, category='vertical', display_name='Unreleased Private Unit',
start=future)
self.released_public_vertical = self._create_block(
parent_location=self.sequential.location, category='vertical', display_name='Released Public Unit',
parent=self.sequential, category='vertical', display_name='Released Public Unit',
start=past)
self.unreleased_public_vertical = self._create_block(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Public Unit',
parent=self.sequential, category='vertical', display_name='Unreleased Public Unit',
start=future)
self.store.publish(self.unreleased_public_vertical.location, self.user.id)
self.store.publish(self.released_public_vertical.location, self.user.id)
self.store.publish(self.vertical.location, self.user.id)
def test_container_html(self):
self._test_html_content(
@@ -67,10 +68,10 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
'<a href="/course/{course}{subsection_parameters}">Lesson 1</a>'
).format(
course=re.escape(str(self.course.id)),
section_parameters=re.escape('?show={}'.format(http.urlquote(
section_parameters=re.escape('?show={}'.format(quote(
str(self.chapter.location).encode()
))),
subsection_parameters=re.escape('?show={}'.format(http.urlquote(
subsection_parameters=re.escape('?show={}'.format(quote(
str(self.sequential.location).encode()
))),
),
@@ -81,8 +82,8 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
Create the scenario of an xblock with children (non-vertical) on the container page.
This should create a container page that is a child of another container page.
"""
draft_container = self._create_block(self.child_container.location, "wrapper", "Wrapper")
self._create_block(draft_container.location, "html", "Child HTML")
draft_container = self._create_block(self.child_container, "wrapper", "Wrapper")
self._create_block(draft_container, "html", "Child HTML")
def test_container_html(xblock):
self._test_html_content(
@@ -97,7 +98,7 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
).format(
course=re.escape(str(self.course.id)),
unit_parameters=re.escape(str(self.vertical.location)),
subsection_parameters=re.escape('?show={}'.format(http.urlquote(
subsection_parameters=re.escape('?show={}'.format(quote(
str(self.sequential.location).encode()
))),
),
@@ -177,12 +178,12 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
self.validate_preview_html(self.child_container, self.container_view)
self.validate_preview_html(self.child_vertical, self.reorderable_child_view)
def _create_block(self, parent_location, category, display_name, **kwargs):
def _create_block(self, parent, category, display_name, **kwargs):
"""
creates a block in the module store, without publishing it.
"""
return BlockFactory.create(
parent_location=parent_location,
parent=parent,
category=category,
display_name=display_name,
publish_item=False,
@@ -194,7 +195,7 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
"""
Verify that a public container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_block(self.vertical.location, 'split_test', 'Split Test')
empty_child_container = self._create_block(self.vertical, 'split_test', 'Split Test 1')
published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)
self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)
@@ -202,7 +203,7 @@ class ContainerPageTestCase(StudioPageTestCase, LibraryTestCase):
"""
Verify that a draft container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_block(self.vertical.location, 'split_test', 'Split Test')
empty_child_container = self._create_block(self.vertical, 'split_test', 'Split Test 1')
self.validate_preview_html(empty_child_container, self.reorderable_child_view, can_add=False)
@patch(

View File

@@ -39,7 +39,7 @@ from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, BlockFactory, LibraryFactory, check_mongo_calls # lint-amnesty, pylint: disable=wrong-import-order
from ..course import _deprecated_blocks_info, course_outline_initial_state, reindex_course_and_check_access
from cms.djangoapps.contentstore.xblock_services.xblock_service import VisibilityState, create_xblock_info
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import VisibilityState, create_xblock_info
class TestCourseIndex(CourseTestCase):
@@ -423,13 +423,13 @@ class TestCourseIndexArchived(CourseTestCase):
@ddt.data(
# Staff user has course staff access
(True, 'staff', None, 0, 20),
(False, 'staff', None, 0, 20),
(True, 'staff', None, 0, 21),
(False, 'staff', None, 0, 21),
# Base user has global staff access
(True, 'user', ORG, 2, 20),
(False, 'user', ORG, 2, 20),
(True, 'user', None, 2, 20),
(False, 'user', None, 2, 20),
(True, 'user', ORG, 2, 21),
(False, 'user', ORG, 2, 21),
(True, 'user', None, 2, 21),
(False, 'user', None, 2, 21),
)
@ddt.unpack
def test_separate_archived_courses(self, separate_archived_courses, username, org, mongo_queries, sql_queries):

View File

@@ -27,7 +27,7 @@ from ..entrance_exam import (
update_entrance_exam
)
from cms.djangoapps.contentstore.helpers import GRADER_TYPES
from cms.djangoapps.contentstore.xblock_services.create_xblock import create_xblock
from cms.djangoapps.contentstore.xblock_storage_handlers.create_xblock import create_xblock
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})

View File

@@ -14,7 +14,7 @@ from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import reverse_usage_url
from openedx.core.lib.gating.api import GATING_NAMESPACE_QUALIFIER
from cms.djangoapps.contentstore.xblock_services.xblock_service import VisibilityState
from cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers import VisibilityState
@ddt.ddt
@@ -57,7 +57,7 @@ class TestSubsectionGating(CourseTestCase):
)
self.seq2_url = reverse_usage_url('xblock_handler', self.seq2.location)
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.add_prerequisite')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.add_prerequisite')
def test_add_prerequisite(self, mock_add_prereq):
"""
Test adding a subsection as a prerequisite
@@ -69,7 +69,7 @@ class TestSubsectionGating(CourseTestCase):
)
mock_add_prereq.assert_called_with(self.course.id, self.seq1.location)
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.remove_prerequisite')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.remove_prerequisite')
def test_remove_prerequisite(self, mock_remove_prereq):
"""
Test removing a subsection as a prerequisite
@@ -81,7 +81,7 @@ class TestSubsectionGating(CourseTestCase):
)
mock_remove_prereq.assert_called_with(self.seq1.location)
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.set_required_content')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.set_required_content')
def test_add_gate(self, mock_set_required_content):
"""
Test adding a gated subsection
@@ -100,7 +100,7 @@ class TestSubsectionGating(CourseTestCase):
'100'
)
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.set_required_content')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.set_required_content')
def test_remove_gate(self, mock_set_required_content):
"""
Test removing a gated subsection
@@ -118,9 +118,9 @@ class TestSubsectionGating(CourseTestCase):
''
)
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.get_prerequisites')
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.get_required_content')
@patch('cms.djangoapps.contentstore.xblock_services.xblock_service.gating_api.is_prerequisite')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.get_prerequisites')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.get_required_content')
@patch('cms.djangoapps.contentstore.xblock_storage_handlers.view_handlers.gating_api.is_prerequisite')
@ddt.data(
(90, None),
(None, 90),

View File

@@ -3,7 +3,7 @@ Unit tests for helpers.py.
"""
from django.utils import http
from urllib.parse import quote
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from xmodule.modulestore.tests.factories import BlockFactory, LibraryFactory # lint-amnesty, pylint: disable=wrong-import-order
@@ -27,7 +27,7 @@ class HelpersTestCase(CourseTestCase):
display_name="Week 1")
self.assertEqual(
xblock_studio_url(chapter),
f'{course_url}?show={http.urlquote(str(chapter.location).encode())}'
f'{course_url}?show={quote(str(chapter.location).encode())}'
)
# Verify sequential URL
@@ -35,7 +35,7 @@ class HelpersTestCase(CourseTestCase):
display_name="Lesson 1")
self.assertEqual(
xblock_studio_url(sequential),
f'{course_url}?show={http.urlquote(str(sequential.location).encode())}'
f'{course_url}?show={quote(str(sequential.location).encode())}'
)
# Verify unit URL

View File

@@ -19,11 +19,15 @@ from organizations.exceptions import InvalidOrganizationException
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient, CourseTestCase, parse_json
from cms.djangoapps.contentstore.utils import reverse_course_url, reverse_library_url
from cms.djangoapps.course_creators.views import add_user_with_status_granted as grant_course_creator_status
from common.djangoapps.student.roles import LibraryUserRole
from common.djangoapps.student.roles import LibraryUserRole, CourseStaffRole
from xmodule.modulestore.tests.factories import LibraryFactory # lint-amnesty, pylint: disable=wrong-import-order
from cms.djangoapps.course_creators.models import CourseCreator
from common.djangoapps.student import auth
from ..component import get_component_templates
from ..library import user_can_create_library
from ..course import get_allowed_organizations_for_libraries
LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries
@@ -51,26 +55,51 @@ class UnitTestLibraries(CourseTestCase):
######################################################
# Tests for /library/ - list and create libraries:
# When libraries are disabled, nobody can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", False)
def test_library_creator_status_libraries_not_enabled(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
self.assertEqual(user_can_create_library(nostaff_user), False)
# When creator group is disabled, non-staff users can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_no_course_creator_role(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
self.assertEqual(user_can_create_library(nostaff_user), True)
# When creator group is enabled, Non staff users cannot create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_for_enabled_creator_group_setting_for_non_staff_users(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.assertEqual(user_can_create_library(nostaff_user), False)
# Global staff can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_is_staff_user(self):
self.assertEqual(user_can_create_library(self.user), True)
# When creator groups are enabled, global staff can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_course_creator_role(self):
def test_library_creator_status_for_enabled_creator_group_setting_with_is_staff_user(self):
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
self.assertEqual(user_can_create_library(self.user), True)
# When creator groups are enabled, course creators can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_course_creator_role_for_enabled_creator_group_setting(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
grant_course_creator_status(self.user, nostaff_user)
self.assertEqual(user_can_create_library(nostaff_user), True)
# When creator groups are enabled, course staff members can create libraries
@mock.patch("cms.djangoapps.contentstore.views.library.LIBRARIES_ENABLED", True)
def test_library_creator_status_with_no_course_creator_role(self):
def test_library_creator_status_with_course_staff_role_for_enabled_creator_group_setting(self):
_, nostaff_user = self.create_non_staff_authed_user_client()
self.assertEqual(user_can_create_library(nostaff_user), True)
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
auth.add_users(self.user, CourseStaffRole(self.course.id), nostaff_user)
self.assertEqual(user_can_create_library(nostaff_user), True)
@ddt.data(
(False, False, True),
@@ -188,9 +217,9 @@ class UnitTestLibraries(CourseTestCase):
self.assertEqual(response.status_code, 200)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission_no_course_creator_role_and_course_creator_group(self):
def test_lib_create_permission_no_course_creator_role_and_no_course_creator_group_and_no_course_staff_role(self):
"""
Users who are not given course creator roles should not be able to create libraries
Users who are not given course creator roles or course staff role should not be able to create libraries
if ENABLE_CREATOR_GROUP is enabled.
"""
self.client.logout()
@@ -201,6 +230,23 @@ class UnitTestLibraries(CourseTestCase):
})
self.assertEqual(response.status_code, 403)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission_course_staff_role(self):
"""
Users who are staff on any existing course should able to create libraries
if ENABLE_CREATOR_GROUP is enabled.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
auth.add_users(self.user, CourseStaffRole(self.course.id), ns_user)
self.assertTrue(auth.user_has_role(ns_user, CourseStaffRole(self.course.id)))
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@ddt.data(
{},
{'org': 'org'},
@@ -405,3 +451,41 @@ class UnitTestLibraries(CourseTestCase):
response = self.client.ajax_post(reverse('xblock_handler'), data)
self.assertEqual(response.status_code, 400)
self.assertIn('cannot have more than 1 component', parse_json(response)['error'])
def test_allowed_organizations_for_library(self):
"""
Test the different organizations that a user can select for creating a library, depending
on Feature Flags and on user role.
With organization staff access enabled, a user should be able to select organizations they
are a staff member of. Else, with creator groups enabled, the user should be able to select
organizations they are course creator for.
"""
course_creator = CourseCreator.objects.create(user=self.user, all_organizations=True)
with patch('cms.djangoapps.course_creators.models.CourseCreator.objects.filter') as mock_filter:
mock_filter.return_value.first.return_value = course_creator
with patch('organizations.models.Organization.objects.all') as mock_all:
mock_all.return_value.values_list.return_value = ['org1', 'org2']
with patch('common.djangoapps.student.roles.OrgStaffRole.get_orgs_for_user') as get_user_orgs:
get_user_orgs.return_value = ['org3']
# Call the method under test
with mock.patch.dict(
'django.conf.settings.FEATURES',
{"ENABLE_ORGANIZATION_STAFF_ACCESS_FOR_CONTENT_LIBRARIES": False}
):
with mock.patch.dict(
'django.conf.settings.FEATURES',
{"ENABLE_CREATOR_GROUP": False}
):
organizations = get_allowed_organizations_for_libraries(self.user)
# Assert that the method returned the expected value
self.assertEqual(organizations, [])
with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}):
organizations = get_allowed_organizations_for_libraries(self.user)
# Assert that the method returned the expected value
self.assertEqual(organizations, ['org1', 'org2'])
with mock.patch.dict(
'django.conf.settings.FEATURES',
{"ENABLE_ORGANIZATION_STAFF_ACCESS_FOR_CONTENT_LIBRARIES": True}
):
organizations = get_allowed_organizations_for_libraries(self.user)
self.assertEqual(organizations, ['org3'])

View File

@@ -172,7 +172,6 @@ class GetPreviewHtmlTestCase(ModuleStoreTestCase):
self.assertFalse(modulestore().has_changes(modulestore().get_item(block.location)))
@XBlock.needs("field-data")
@XBlock.needs("i18n")
@XBlock.needs("mako")
@XBlock.needs("replace_urls")
@@ -187,7 +186,7 @@ class PureXBlock(XBlock):
Renders the output that a student will see.
"""
fragment = Fragment()
fragment.add_content(self.runtime.service(self, 'mako').render_template('edxmako.html', context))
fragment.add_content(self.runtime.service(self, 'mako').render_lms_template('edxmako.html', context))
return fragment
@@ -204,7 +203,6 @@ class StudioXBlockServiceBindingTest(ModuleStoreTestCase):
self.user = UserFactory()
self.course = CourseFactory.create()
self.request = mock.Mock()
self.field_data = mock.Mock()
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
@ddt.data("user", "i18n", "field-data", "teams_configuration", "replace_urls")
@@ -213,11 +211,7 @@ class StudioXBlockServiceBindingTest(ModuleStoreTestCase):
Tests that the 'user' and 'i18n' services are provided by the Studio runtime.
"""
block = BlockFactory(category="pure", parent=self.course)
_prepare_runtime_for_preview(
self.request,
block,
self.field_data,
)
_prepare_runtime_for_preview(self.request, block)
service = block.runtime.service(block, expected_service)
self.assertIsNotNone(service)
@@ -241,14 +235,9 @@ class CmsModuleSystemShimTest(ModuleStoreTestCase):
self.request = RequestFactory().get('/dummy-url')
self.request.user = self.user
self.request.session = {}
self.field_data = mock.Mock()
self.contentstore = contentstore()
self.block = BlockFactory(category="problem", parent=course)
_prepare_runtime_for_preview(
self.request,
block=self.block,
field_data=mock.Mock(),
)
_prepare_runtime_for_preview(self.request, block=self.block)
self.course = self.store.get_item(course.location)
def test_get_user_role(self):
@@ -303,11 +292,7 @@ class CmsModuleSystemShimTest(ModuleStoreTestCase):
"""Test anonymous_user_id on a block which uses per-student anonymous IDs"""
# Create the runtime with the flag turned on.
block = BlockFactory(category="problem", parent=self.course)
_prepare_runtime_for_preview(
self.request,
block=block,
field_data=mock.Mock(),
)
_prepare_runtime_for_preview(self.request, block=block)
deprecated_anonymous_user_id = (
block.runtime.service(block, 'user').get_current_user().opt_attrs.get(ATTR_KEY_DEPRECATED_ANONYMOUS_USER_ID)
)
@@ -318,11 +303,7 @@ class CmsModuleSystemShimTest(ModuleStoreTestCase):
"""Test anonymous_user_id on a block which uses per-course anonymous IDs"""
# Create the runtime with the flag turned on.
block = BlockFactory(category="lti", parent=self.course)
_prepare_runtime_for_preview(
self.request,
block=block,
field_data=mock.Mock(),
)
_prepare_runtime_for_preview(self.request, block=block)
anonymous_user_id = (
block.runtime.service(block, 'user').get_current_user().opt_attrs.get(ATTR_KEY_ANONYMOUS_USER_ID)

View File

@@ -12,13 +12,15 @@ from django.urls import reverse
from edxval import api
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.transcript_storage_handlers import (
TranscriptionProviderErrorType,
validate_transcript_credentials
)
from cms.djangoapps.contentstore.utils import reverse_course_url
from common.djangoapps.student.roles import CourseStaffRole
from openedx.core.djangoapps.profile_images.tests.helpers import make_image_file
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_for_user
from ..transcript_settings import TranscriptionProviderErrorType, validate_transcript_credentials
@ddt.ddt
@patch(
@@ -94,7 +96,7 @@ class TranscriptCredentialsTest(CourseTestCase):
)
)
@ddt.unpack
@patch('cms.djangoapps.contentstore.views.transcript_settings.update_3rd_party_transcription_service_credentials')
@patch('cms.djangoapps.contentstore.transcript_storage_handlers.update_3rd_party_transcription_service_credentials')
def test_transcript_credentials_handler(self, request_payload, update_credentials_response, expected_status_code,
expected_response, mock_update_credentials):
"""
@@ -211,7 +213,7 @@ class TranscriptDownloadTest(CourseTestCase):
response = self.client.post(self.view_url, content_type='application/json')
self.assertEqual(response.status_code, 405)
@patch('cms.djangoapps.contentstore.views.transcript_settings.get_video_transcript_data')
@patch('cms.djangoapps.contentstore.transcript_storage_handlers.get_video_transcript_data')
def test_transcript_download_handler(self, mock_get_video_transcript_data):
"""
Tests that transcript download handler works as expected.
@@ -303,9 +305,9 @@ class TranscriptUploadTest(CourseTestCase):
response = self.client.get(self.view_url, content_type='application/json')
self.assertEqual(response.status_code, 405)
@patch('cms.djangoapps.contentstore.views.transcript_settings.create_or_update_video_transcript')
@patch('cms.djangoapps.contentstore.transcript_storage_handlers.create_or_update_video_transcript')
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler(self, mock_create_or_update_video_transcript):
@@ -370,7 +372,7 @@ class TranscriptUploadTest(CourseTestCase):
)
@ddt.unpack
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_missing_attrs(self, request_payload, expected_error_message):
@@ -383,7 +385,7 @@ class TranscriptUploadTest(CourseTestCase):
self.assertEqual(json.loads(response.content.decode('utf-8'))['error'], expected_error_message)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en', 'es'])
)
def test_transcript_upload_handler_existing_transcript(self):
@@ -405,7 +407,7 @@ class TranscriptUploadTest(CourseTestCase):
)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_with_image(self):
@@ -432,7 +434,7 @@ class TranscriptUploadTest(CourseTestCase):
)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_with_invalid_transcript(self):
@@ -588,9 +590,9 @@ class TranscriptUploadApiTest(CourseTestCase):
response = self.client.get(self.view_url, content_type='application/json')
self.assertEqual(response.status_code, 405)
@patch('cms.djangoapps.contentstore.views.transcript_settings.create_or_update_video_transcript')
@patch('cms.djangoapps.contentstore.transcript_storage_handlers.create_or_update_video_transcript')
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler(self, mock_create_or_update_video_transcript):
@@ -655,7 +657,7 @@ class TranscriptUploadApiTest(CourseTestCase):
)
@ddt.unpack
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_missing_attrs(self, request_payload, expected_error_message):
@@ -668,7 +670,7 @@ class TranscriptUploadApiTest(CourseTestCase):
self.assertEqual(json.loads(response.content.decode('utf-8'))['error'], expected_error_message)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en', 'es'])
)
def test_transcript_upload_handler_existing_transcript(self):
@@ -690,7 +692,7 @@ class TranscriptUploadApiTest(CourseTestCase):
)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_with_image(self):
@@ -717,7 +719,7 @@ class TranscriptUploadApiTest(CourseTestCase):
)
@patch(
'cms.djangoapps.contentstore.views.transcript_settings.get_available_transcript_languages',
'cms.djangoapps.contentstore.transcript_storage_handlers.get_available_transcript_languages',
Mock(return_value=['en']),
)
def test_transcript_upload_handler_with_invalid_transcript(self):

View File

@@ -28,13 +28,13 @@ class UnitPageTestCase(StudioPageTestCase):
Verify that a public xblock's preview returns the expected HTML.
"""
published_video = self.store.publish(self.video.location, self.user.id) # lint-amnesty, pylint: disable=unused-variable
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
self.validate_preview_html(self.video, STUDENT_VIEW, in_unit=True, can_add=False)
def test_draft_component_preview_html(self):
"""
Verify that a draft xblock's preview returns the expected HTML.
"""
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
self.validate_preview_html(self.video, STUDENT_VIEW, in_unit=True, can_add=False)
def test_public_child_container_preview_html(self):
"""
@@ -46,7 +46,7 @@ class UnitPageTestCase(StudioPageTestCase):
BlockFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
published_child_container = self.store.publish(child_container.location, self.user.id)
self.validate_preview_html(published_child_container, STUDENT_VIEW, can_add=False)
self.validate_preview_html(published_child_container, STUDENT_VIEW, in_unit=True, can_add=False)
def test_draft_child_container_preview_html(self):
"""
@@ -58,4 +58,4 @@ class UnitPageTestCase(StudioPageTestCase):
BlockFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
draft_child_container = self.store.get_item(child_container.location)
self.validate_preview_html(draft_child_container, STUDENT_VIEW, can_add=False)
self.validate_preview_html(draft_child_container, STUDENT_VIEW, in_unit=True, can_add=False)

View File

@@ -43,11 +43,15 @@ from ..videos import (
ENABLE_VIDEO_UPLOAD_PAGINATION,
KEY_EXPIRATION_IN_SECONDS,
VIDEO_IMAGE_UPLOAD_ENABLED,
PUBLIC_VIDEO_SHARE,
StatusDisplayStrings,
TranscriptProvider,
)
from cms.djangoapps.contentstore.video_storage_handlers import (
_get_default_video_image_url,
convert_video_status, storage_service_bucket, storage_service_key
TranscriptProvider,
StatusDisplayStrings,
convert_video_status,
storage_service_bucket,
storage_service_key,
PUBLIC_VIDEO_SHARE
)
@@ -210,7 +214,7 @@ class VideoUploadPostTestsMixin:
"""
@override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret')
@patch('boto.s3.key.Key')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
def test_post_success(self, mock_conn, mock_key):
files = [
{
@@ -467,7 +471,7 @@ class VideosHandlerTestCase(
@override_settings(AWS_ACCESS_KEY_ID="test_key_id", AWS_SECRET_ACCESS_KEY="test_secret")
@patch("boto.s3.key.Key")
@patch("cms.djangoapps.contentstore.views.videos.S3Connection")
@patch("cms.djangoapps.contentstore.video_storage_handlers.S3Connection")
@ddt.data(
(
[
@@ -529,7 +533,7 @@ class VideosHandlerTestCase(
self.assertEqual(response['error'], "Request 'files' entry contain unsupported content_type")
@override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
def test_upload_with_non_ascii_charaters(self, mock_conn):
"""
Test that video uploads throws error message when file name contains special characters.
@@ -552,7 +556,7 @@ class VideosHandlerTestCase(
@override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret', AWS_SECURITY_TOKEN='token')
@patch('boto.s3.key.Key')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
@override_waffle_flag(ENABLE_DEVSTACK_VIDEO_UPLOADS, active=True)
def test_devstack_upload_connection(self, mock_conn, mock_key):
files = [{'file_name': 'first.mp4', 'content_type': 'video/mp4'}]
@@ -580,7 +584,7 @@ class VideosHandlerTestCase(
)
@patch('boto.s3.key.Key')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
def test_send_course_to_vem_pipeline(self, mock_conn, mock_key):
"""
Test that uploads always go to VEM S3 bucket by default.
@@ -610,7 +614,7 @@ class VideosHandlerTestCase(
@override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret')
@patch('boto.s3.key.Key')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
@ddt.data(
{
'global_waffle': True,
@@ -770,7 +774,7 @@ class VideosHandlerTestCase(
# Test should fail if video not found
self.assertEqual(True, False, 'Invalid edx_video_id')
@patch('cms.djangoapps.contentstore.views.videos.LOGGER')
@patch('cms.djangoapps.contentstore.video_storage_handlers.LOGGER')
def test_video_status_update_request(self, mock_logger):
"""
Verifies that video status update request works as expected.
@@ -1447,8 +1451,8 @@ class TranscriptPreferencesTestCase(VideoUploadTestBase, CourseTestCase):
@ddt.unpack
@override_settings(AWS_ACCESS_KEY_ID='test_key_id', AWS_SECRET_ACCESS_KEY='test_secret')
@patch('boto.s3.key.Key')
@patch('cms.djangoapps.contentstore.views.videos.S3Connection')
@patch('cms.djangoapps.contentstore.views.videos.get_transcript_preferences')
@patch('cms.djangoapps.contentstore.video_storage_handlers.S3Connection')
@patch('cms.djangoapps.contentstore.video_storage_handlers.get_transcript_preferences')
def test_transcript_preferences_metadata(self, transcript_preferences, is_video_transcript_enabled,
mock_transcript_preferences, mock_conn, mock_key):
"""

View File

@@ -18,9 +18,9 @@ class StudioPageTestCase(CourseTestCase):
def setUp(self):
super().setUp()
self.chapter = BlockFactory.create(parent_location=self.course.location,
self.chapter = BlockFactory.create(parent=self.course,
category='chapter', display_name="Week 1")
self.sequential = BlockFactory.create(parent_location=self.chapter.location,
self.sequential = BlockFactory.create(parent=self.chapter,
category='sequential', display_name="Lesson 1")
def get_page_html(self, xblock):
@@ -43,7 +43,7 @@ class StudioPageTestCase(CourseTestCase):
resp_content = json.loads(resp.content.decode('utf-8'))
return resp_content['html']
def validate_preview_html(self, xblock, view_name, can_add=True, can_reorder=True, can_move=True,
def validate_preview_html(self, xblock, view_name, in_unit=False, can_add=True, can_reorder=True, can_move=True,
can_edit=True, can_duplicate=True, can_delete=True):
"""
Verify that the specified xblock's preview has the expected HTML elements.
@@ -59,9 +59,20 @@ class StudioPageTestCase(CourseTestCase):
'<span data-tooltip="Drag to reorder" class="drag-handle action"></span>',
can_reorder
)
if in_unit:
move_action_html = '<button data-tooltip="Move" class="btn-default move-button action-button">'
delete_action_html = '<button data-tooltip="Delete" class="btn-default delete-button action-button">'
duplicate_action_html = \
'<button data-tooltip="Duplicate" class="btn-default duplicate-button action-button">'
else:
move_action_html = '<a class="move-button" href="#" role="button">Move</a>'
delete_action_html = '<a class="delete-button" href="#" role="button">Delete</a>'
duplicate_action_html = '<a class="duplicate-button" href="#" role="button">Duplicate</a>'
self.validate_html_for_action_button(
html,
'<button data-tooltip="Move" class="btn-default move-button action-button">',
move_action_html,
can_move
)
self.validate_html_for_action_button(
@@ -69,17 +80,19 @@ class StudioPageTestCase(CourseTestCase):
'button class="btn-default edit-button action-button"',
can_edit
)
self.validate_html_for_action_button(
html,
'<button data-tooltip="Delete" class="btn-default delete-button action-button">',
can_duplicate
)
self.validate_html_for_action_button(
html,
'<button data-tooltip="Duplicate" class="btn-default duplicate-button action-button">',
delete_action_html,
can_delete
)
self.validate_html_for_action_button(
html,
duplicate_action_html,
can_duplicate
)
def validate_html_for_action_button(self, html, expected_html, can_action=True):
"""
Validate that the specified HTML has specific action..

View File

@@ -4,32 +4,23 @@ Views related to the transcript preferences feature
import logging
import os
from django.contrib.auth.decorators import login_required
from django.core.files.base import ContentFile
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import gettext as _
from django.http import HttpResponseNotFound
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from edxval.api import (
create_or_update_video_transcript,
delete_video_transcript,
get_3rd_party_transcription_plans,
get_available_transcript_languages,
get_video_transcript_data,
update_transcript_credentials_state_for_org
)
from opaque_keys.edx.keys import CourseKey
from rest_framework.decorators import api_view
from cms.djangoapps.contentstore.transcript_storage_handlers import (
validate_transcript_upload_data,
upload_transcript,
delete_video_transcript,
handle_transcript_credentials,
handle_transcript_download,
)
from common.djangoapps.student.auth import has_studio_write_access
from common.djangoapps.util.json_request import JsonResponse, expect_json
from openedx.core.djangoapps.video_config.models import VideoTranscriptEnabledFlag
from openedx.core.djangoapps.video_pipeline.api import update_3rd_party_transcription_service_credentials
from openedx.core.lib.api.view_utils import view_auth_classes
from xmodule.video_block.transcripts_utils import Transcript, TranscriptsGenerationException # lint-amnesty, pylint: disable=wrong-import-order
from .videos import TranscriptProvider
__all__ = [
'transcript_credentials_handler',
@@ -42,51 +33,6 @@ __all__ = [
LOGGER = logging.getLogger(__name__)
class TranscriptionProviderErrorType:
"""
Transcription provider's error types enumeration.
"""
INVALID_CREDENTIALS = 1
def validate_transcript_credentials(provider, **credentials):
"""
Validates transcript credentials.
Validations:
Providers must be either 3PlayMedia or Cielo24.
In case of:
3PlayMedia - 'api_key' and 'api_secret_key' are required.
Cielo24 - 'api_key' and 'username' are required.
It ignores any extra/unrelated parameters passed in credentials and
only returns the validated ones.
"""
error_message, validated_credentials = '', {}
valid_providers = list(get_3rd_party_transcription_plans().keys())
if provider in valid_providers:
must_have_props = []
if provider == TranscriptProvider.THREE_PLAY_MEDIA:
must_have_props = ['api_key', 'api_secret_key']
elif provider == TranscriptProvider.CIELO24:
must_have_props = ['api_key', 'username']
missing = [
must_have_prop for must_have_prop in must_have_props if must_have_prop not in list(credentials.keys()) # lint-amnesty, pylint: disable=consider-iterating-dictionary
]
if missing:
error_message = '{missing} must be specified.'.format(missing=' and '.join(missing))
return error_message, validated_credentials
validated_credentials.update({
prop: credentials[prop] for prop in must_have_props
})
else:
error_message = f'Invalid Provider {provider}.'
return error_message, validated_credentials
@expect_json
@login_required
@require_POST
@@ -103,35 +49,7 @@ def transcript_credentials_handler(request, course_key_string):
- A 404 response if transcript feature is not enabled for this course.
- A 400 if credentials do not pass validations, hence not updated in edx-video-pipeline.
"""
course_key = CourseKey.from_string(course_key_string)
if not VideoTranscriptEnabledFlag.feature_enabled(course_key):
return HttpResponseNotFound()
provider = request.json.pop('provider')
error_message, validated_credentials = validate_transcript_credentials(provider=provider, **request.json)
if error_message:
response = JsonResponse({'error': error_message}, status=400)
else:
# Send the validated credentials to edx-video-pipeline and video-encode-manager
credentials_payload = dict(validated_credentials, org=course_key.org, provider=provider)
error_response, is_updated = update_3rd_party_transcription_service_credentials(**credentials_payload)
# Send appropriate response based on whether credentials were updated or not.
if is_updated:
# Cache credentials state in edx-val.
update_transcript_credentials_state_for_org(org=course_key.org, provider=provider, exists=is_updated)
response = JsonResponse(status=200)
else:
# Error response would contain error types and the following
# error type is received from edx-video-pipeline whenever we've
# got invalid credentials for a provider. Its kept this way because
# edx-video-pipeline doesn't support i18n translations yet.
error_type = error_response.get('error_type')
if error_type == TranscriptionProviderErrorType.INVALID_CREDENTIALS:
error_message = _('The information you entered is incorrect.')
response = JsonResponse({'error': error_message}, status=400)
return response
return handle_transcript_credentials(request, course_key_string)
@login_required
@@ -148,112 +66,17 @@ def transcript_download_handler(request):
- A 400 if there is a validation error.
- A 404 if there is no such transcript.
"""
missing = [attr for attr in ['edx_video_id', 'language_code'] if attr not in request.GET]
if missing:
return JsonResponse(
{'error': _('The following parameters are required: {missing}.').format(missing=', '.join(missing))},
status=400
)
edx_video_id = request.GET['edx_video_id']
language_code = request.GET['language_code']
transcript = get_video_transcript_data(video_id=edx_video_id, language_code=language_code)
if transcript:
name_and_extension = os.path.splitext(transcript['file_name'])
basename, file_format = name_and_extension[0], name_and_extension[1][1:]
transcript_filename = f'{basename}.{Transcript.SRT}'
transcript_content = Transcript.convert(
content=transcript['content'],
input_format=file_format,
output_format=Transcript.SRT
)
# Construct an HTTP response
response = HttpResponse(transcript_content, content_type=Transcript.mime_types[Transcript.SRT])
response['Content-Disposition'] = f'attachment; filename="{transcript_filename}"'
else:
response = HttpResponseNotFound()
return response
def upload_transcript(request):
"""
Upload a transcript file
Arguments:
request: A WSGI request object
Transcript file in SRT format
"""
edx_video_id = request.POST['edx_video_id']
language_code = request.POST['language_code']
new_language_code = request.POST['new_language_code']
transcript_file = request.FILES['file']
try:
# Convert SRT transcript into an SJSON format
# and upload it to S3.
sjson_subs = Transcript.convert(
content=transcript_file.read().decode('utf-8'),
input_format=Transcript.SRT,
output_format=Transcript.SJSON
).encode()
create_or_update_video_transcript(
video_id=edx_video_id,
language_code=language_code,
metadata={
'provider': TranscriptProvider.CUSTOM,
'file_format': Transcript.SJSON,
'language_code': new_language_code
},
file_data=ContentFile(sjson_subs),
)
response = JsonResponse(status=201)
except (TranscriptsGenerationException, UnicodeDecodeError):
LOGGER.error("Unable to update transcript on edX video %s for language %s", edx_video_id, new_language_code)
response = JsonResponse(
{'error': _('There is a problem with this transcript file. Try to upload a different file.')},
status=400
)
finally:
LOGGER.info("Updated transcript on edX video %s for language %s", edx_video_id, new_language_code)
return response
def validate_transcript_upload_data(data, files):
"""
Validates video transcript file.
Arguments:
data: A request's data part.
files: A request's files part.
Returns:
None or String
If there is error returns error message otherwise None.
"""
error = None
# Validate the must have attributes - this error is unlikely to be faced by common users.
must_have_attrs = ['edx_video_id', 'language_code', 'new_language_code']
missing = [attr for attr in must_have_attrs if attr not in data]
if missing:
error = _('The following parameters are required: {missing}.').format(missing=', '.join(missing))
elif (
data['language_code'] != data['new_language_code'] and
data['new_language_code'] in get_available_transcript_languages(video_id=data['edx_video_id'])
):
error = _('A transcript with the "{language_code}" language code already exists.'.format( # lint-amnesty, pylint: disable=translation-of-non-string
language_code=data['new_language_code']
))
elif 'file' not in files:
error = _('A transcript file is required.')
return error
return handle_transcript_download(request)
# New version of this transcript upload API in contentstore/rest_api/transcripts.py
# Keeping the old API for backward compatibility
@api_view(['POST'])
@view_auth_classes()
@expect_json
def transcript_upload_api(request):
"""
API View for uploading transcript files.
(Old) API View for uploading transcript files.
Arguments:
request: A WSGI request object

View File

@@ -21,7 +21,7 @@ from edxval.api import create_external_video, create_or_update_video_transcript
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from cms.djangoapps.contentstore.views.videos import TranscriptProvider
from cms.djangoapps.contentstore.video_storage_handlers import TranscriptProvider
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.util.json_request import JsonResponse
from xmodule.contentstore.content import StaticContent # lint-amnesty, pylint: disable=wrong-import-order

View File

@@ -5,6 +5,7 @@ from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseNotFound
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods, require_POST
@@ -18,7 +19,9 @@ from common.djangoapps.student.auth import STUDIO_EDIT_ROLES, STUDIO_VIEW_USERS,
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from common.djangoapps.util.json_request import JsonResponse, expect_json
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from ..toggles import use_new_course_team_page
from ..utils import get_course_team_url, get_course_team
__all__ = ['request_course_creator', 'course_team_handler']
@@ -55,6 +58,8 @@ def course_team_handler(request, course_key_string=None, email=None):
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
return _course_team_user(request, course_key, email)
elif request.method == 'GET': # assume html
if use_new_course_team_page(course_key):
return redirect(get_course_team_url(course_key))
return _manage_users(request, course_key)
else:
return HttpResponseNotFound()
@@ -79,23 +84,8 @@ def _manage_users(request, course_key):
if not user_perms & STUDIO_VIEW_USERS:
raise PermissionDenied()
course_block = modulestore().get_course(course_key)
instructors = set(CourseInstructorRole(course_key).users_with_role())
# the page only lists staff and assumes they're a superset of instructors. Do a union to ensure.
staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors)
formatted_users = []
for user in instructors:
formatted_users.append(user_with_role(user, 'instructor'))
for user in staff - instructors:
formatted_users.append(user_with_role(user, 'staff'))
return render_to_response('manage_users.html', {
'context_course': course_block,
'show_transfer_ownership_hint': request.user in instructors and len(instructors) == 1,
'users': formatted_users,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
})
manage_users_context = get_course_team(request.user, course_key, user_perms)
return render_to_response('manage_users.html', manage_users_context)
@expect_json

View File

@@ -3,63 +3,35 @@ Views related to the video upload feature
"""
import codecs
import csv
import io
import json
import logging
from contextlib import closing
from datetime import datetime, timedelta
from uuid import uuid4
from boto.s3.connection import S3Connection
from boto import s3
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.http import FileResponse, HttpResponseNotFound
from django.urls import reverse
from django.utils.translation import gettext as _
from django.utils.translation import gettext_noop
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from edx_toggles.toggles import WaffleSwitch
from edxval.api import (
SortDirection,
VideoSortField,
create_or_update_transcript_preferences,
create_video,
get_3rd_party_transcription_plans,
get_available_transcript_languages,
get_video_transcript_url,
get_transcript_credentials_state_for_org,
get_transcript_preferences,
get_videos_for_course,
remove_transcript_preferences,
remove_video_for_course,
update_video_image,
update_video_status
)
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from rest_framework import status as rest_status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.util.json_request import JsonResponse, expect_json
from openedx.core.djangoapps.video_config.models import VideoTranscriptEnabledFlag
from openedx.core.djangoapps.video_config.toggles import PUBLIC_VIDEO_SHARE
from openedx.core.djangoapps.video_pipeline.config.waffle import (
DEPRECATE_YOUTUBE,
ENABLE_DEVSTACK_VIDEO_UPLOADS,
from cms.djangoapps.contentstore.video_storage_handlers import (
handle_videos,
handle_generate_video_upload_link,
handle_video_images,
check_video_images_upload_enabled,
enabled_video_features,
handle_transcript_preferences,
get_video_encodings_download,
validate_transcript_preferences as validate_transcript_preferences_source_function,
convert_video_status as convert_video_status_source_function,
get_all_transcript_languages as get_all_transcript_languages_source_function,
videos_index_html as videos_index_html_source_function,
videos_index_json as videos_index_json_source_function,
videos_post as videos_post_source_function,
storage_service_bucket as storage_service_bucket_source_function,
storage_service_key as storage_service_key_source_function,
send_video_status_update as send_video_status_update_source_function,
is_status_update_request as is_status_update_request_source_function,
)
from common.djangoapps.util.json_request import expect_json
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
from openedx.core.lib.api.view_utils import view_auth_classes
from xmodule.video_block.transcripts_utils import Transcript # lint-amnesty, pylint: disable=wrong-import-order
from ..models import VideoUploadConfig
from ..utils import reverse_course_url
from ..video_utils import validate_video_image
from .course import get_course_and_check_access
__all__ = [
'videos_handler',
@@ -103,85 +75,6 @@ MAX_UPLOAD_HOURS = 24
VIDEOS_PER_PAGE = 100
class TranscriptProvider:
"""
Transcription Provider Enumeration
"""
CIELO24 = 'Cielo24'
THREE_PLAY_MEDIA = '3PlayMedia'
CUSTOM = 'Custom'
class StatusDisplayStrings:
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = gettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = gettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = gettext_noop("Ready")
# Translators: This is the status for a video that is uploaded completely
_UPLOAD_COMPLETED = gettext_noop("Uploaded")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = gettext_noop("Failed")
# Translators: This is the status for a video that is cancelled during upload by user
_CANCELLED = gettext_noop("Cancelled")
# Translators: This is the status for a video which has failed
# due to being flagged as a duplicate by an external or internal CMS
_DUPLICATE = gettext_noop("Failed Duplicate")
# Translators: This is the status for a video which has duplicate token for youtube
_YOUTUBE_DUPLICATE = gettext_noop("YouTube Duplicate")
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = gettext_noop("Invalid Token")
# Translators: This is the status for a video that was included in a course import
_IMPORTED = gettext_noop("Imported")
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = gettext_noop("Unknown")
# Translators: This is the status for a video that is having its transcription in progress on servers
_TRANSCRIPTION_IN_PROGRESS = gettext_noop("Transcription in Progress")
# Translators: This is the status for a video whose transcription is complete
_TRANSCRIPT_READY = gettext_noop("Transcript Ready")
# Translators: This is the status for a video whose transcription job was failed for some languages
_PARTIAL_FAILURE = gettext_noop("Partial Failure")
# Translators: This is the status for a video whose transcription job has failed altogether
_TRANSCRIPT_FAILED = gettext_noop("Transcript Failed")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"upload_completed": _UPLOAD_COMPLETED,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"upload_failed": _FAILED,
"s3_upload_failed": _FAILED,
"upload_cancelled": _CANCELLED,
"duplicate": _DUPLICATE,
"youtube_duplicate": _YOUTUBE_DUPLICATE,
"invalid_token": _INVALID_TOKEN,
"imported": _IMPORTED,
"transcription_in_progress": _TRANSCRIPTION_IN_PROGRESS,
"transcript_ready": _TRANSCRIPT_READY,
"partial_failure": _PARTIAL_FAILURE,
# TODO: Add a related unit tests when the VAL update is part of platform
"transcript_failed": _TRANSCRIPT_FAILED,
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
# pylint: disable=translation-of-non-string
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN))
@expect_json
@login_required
@require_http_methods(("GET", "POST", "DELETE"))
@@ -197,31 +90,17 @@ def videos_handler(request, course_key_string, edx_video_id=None):
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
contained in the response. Example payload:
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
DELETE
soft deletes a video for particular course
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
pagination_conf = _generate_pagination_configuration(course_key_string, request)
return videos_index_html(course, pagination_conf)
elif request.method == "DELETE":
remove_video_for_course(course_key_string, edx_video_id)
return JsonResponse()
else:
if is_status_update_request(request.json):
return send_video_status_update(request.json)
elif _is_pagination_context_update_request(request):
return _update_pagination_context(request)
data, status = videos_post(course, request)
return JsonResponse(data, status=status)
return handle_videos(request, course_key_string, edx_video_id)
@api_view(['POST'])
@@ -232,12 +111,7 @@ def generate_video_upload_link_handler(request, course_key_string):
API for creating a video upload. Returns an edx_video_id and a presigned URL that can be used
to upload the video to AWS S3.
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return Response(data='Course Not Found', status=rest_status.HTTP_400_BAD_REQUEST)
data, status = videos_post(course, request)
return Response(data, status=status)
return handle_generate_video_upload_link(request, course_key_string)
@expect_json
@@ -245,131 +119,31 @@ def generate_video_upload_link_handler(request, course_key_string):
@require_POST
def video_images_handler(request, course_key_string, edx_video_id=None):
"""Function to handle image files"""
# respond with a 404 if image upload is not enabled.
if not VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled():
return HttpResponseNotFound()
if 'file' not in request.FILES:
return JsonResponse({'error': _('An image file is required.')}, status=400)
image_file = request.FILES['file']
error = validate_video_image(image_file)
if error:
return JsonResponse({'error': error}, status=400)
with closing(image_file):
image_url = update_video_image(edx_video_id, course_key_string, image_file, image_file.name)
LOGGER.info(
'VIDEOS: Video image uploaded for edx_video_id [%s] in course [%s]', edx_video_id, course_key_string
)
return JsonResponse({'image_url': image_url})
return handle_video_images(request, course_key_string, edx_video_id)
@login_required
@require_GET
def video_images_upload_enabled(request):
"""Function to check if images can be uploaded"""
# respond with a false if image upload is not enabled.
if not VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled():
return JsonResponse({'allowThumbnailUpload': False})
return JsonResponse({'allowThumbnailUpload': True})
return check_video_images_upload_enabled(request)
@login_required
@require_GET
def get_video_features(request):
""" Return a dict with info about which video features are enabled """
features = {
'allowThumbnailUpload': VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled(),
'videoSharingEnabled': PUBLIC_VIDEO_SHARE.is_enabled(),
}
return JsonResponse(features)
return enabled_video_features(request)
def validate_transcript_preferences(provider, cielo24_fidelity, cielo24_turnaround,
three_play_turnaround, video_source_language, preferred_languages):
"""
Validate 3rd Party Transcription Preferences.
Arguments:
provider: Transcription provider
cielo24_fidelity: Cielo24 transcription fidelity.
cielo24_turnaround: Cielo24 transcription turnaround.
three_play_turnaround: 3PlayMedia transcription turnaround.
video_source_language: Source/Speech language of the videos that are going to be submitted to the Providers.
preferred_languages: list of language codes.
Returns:
validated preferences or a validation error.
Exposes helper method without breaking existing bindings/dependencies
"""
error, preferences = None, {}
# validate transcription providers
transcription_plans = get_3rd_party_transcription_plans()
if provider in list(transcription_plans.keys()): # lint-amnesty, pylint: disable=consider-iterating-dictionary
# Further validations for providers
if provider == TranscriptProvider.CIELO24:
# Validate transcription fidelity
if cielo24_fidelity in transcription_plans[provider]['fidelity']:
# Validate transcription turnaround
if cielo24_turnaround not in transcription_plans[provider]['turnaround']:
error = f'Invalid cielo24 turnaround {cielo24_turnaround}.'
return error, preferences
# Validate transcription languages
supported_languages = transcription_plans[provider]['fidelity'][cielo24_fidelity]['languages']
if video_source_language not in supported_languages:
error = f'Unsupported source language {video_source_language}.'
return error, preferences
if not preferred_languages or not set(preferred_languages) <= set(supported_languages.keys()):
error = f'Invalid languages {preferred_languages}.'
return error, preferences
# Validated Cielo24 preferences
preferences = {
'video_source_language': video_source_language,
'cielo24_fidelity': cielo24_fidelity,
'cielo24_turnaround': cielo24_turnaround,
'preferred_languages': preferred_languages,
}
else:
error = f'Invalid cielo24 fidelity {cielo24_fidelity}.'
elif provider == TranscriptProvider.THREE_PLAY_MEDIA:
# Validate transcription turnaround
if three_play_turnaround not in transcription_plans[provider]['turnaround']:
error = f'Invalid 3play turnaround {three_play_turnaround}.'
return error, preferences
# Validate transcription languages
valid_translations_map = transcription_plans[provider]['translations']
if video_source_language not in list(valid_translations_map.keys()):
error = f'Unsupported source language {video_source_language}.'
return error, preferences
valid_target_languages = valid_translations_map[video_source_language]
if not preferred_languages or not set(preferred_languages) <= set(valid_target_languages):
error = f'Invalid languages {preferred_languages}.'
return error, preferences
# Validated 3PlayMedia preferences
preferences = {
'three_play_turnaround': three_play_turnaround,
'video_source_language': video_source_language,
'preferred_languages': preferred_languages,
}
else:
error = f'Invalid provider {provider}.'
return error, preferences
return validate_transcript_preferences_source_function(provider, cielo24_fidelity, cielo24_turnaround,
three_play_turnaround, video_source_language,
preferred_languages)
@expect_json
@@ -385,32 +159,7 @@ def transcript_preferences_handler(request, course_key_string):
Returns: valid json response or 400 with error message
"""
course_key = CourseKey.from_string(course_key_string)
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course_key)
if not is_video_transcript_enabled:
return HttpResponseNotFound()
if request.method == 'POST':
data = request.json
provider = data.get('provider')
error, preferences = validate_transcript_preferences(
provider=provider,
cielo24_fidelity=data.get('cielo24_fidelity', ''),
cielo24_turnaround=data.get('cielo24_turnaround', ''),
three_play_turnaround=data.get('three_play_turnaround', ''),
video_source_language=data.get('video_source_language'),
preferred_languages=list(map(str, data.get('preferred_languages', [])))
)
if error:
response = JsonResponse({'error': error}, status=400)
else:
preferences.update({'provider': provider})
transcript_preferences = create_or_update_transcript_preferences(course_key_string, **preferences)
response = JsonResponse({'transcript_preferences': transcript_preferences}, status=200)
return response
elif request.method == 'DELETE':
remove_transcript_preferences(course_key_string)
return JsonResponse()
return handle_transcript_preferences(request, course_key_string)
@login_required
@@ -423,491 +172,67 @@ def video_encodings_download(request, course_key_string):
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos, __ = _get_videos(course)
videos = list(videos)
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return dict(ret.items())
# Write csv to bytes-like object. We need a separate writer and buffer as the csv
# writer writes str and the FileResponse expects a bytes files.
buffer = io.BytesIO()
buffer_writer = codecs.getwriter("utf-8")(buffer)
writer = csv.DictWriter(
buffer_writer,
[name_col, duration_col, added_col, video_id_col, status_col] + profile_cols,
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
buffer.seek(0)
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course) + ".csv"
return FileResponse(buffer, as_attachment=True, filename=filename, content_type="text/csv")
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
return get_video_encodings_download(request, course_key_string)
def convert_video_status(video, is_video_encodes_ready=False):
"""
Convert status of a video. Status can be converted to one of the following:
* FAILED if video is in `upload` state for more than 24 hours
* `YouTube Duplicate` if status is `invalid_token`
* user-friendly video status
Exposes helper method without breaking existing bindings/dependencies
"""
now = datetime.now(video.get('created', datetime.now().replace(tzinfo=UTC)).tzinfo)
if video['status'] == 'upload' and (now - video['created']) > timedelta(hours=MAX_UPLOAD_HOURS):
new_status = 'upload_failed'
status = StatusDisplayStrings.get(new_status)
message = 'Video with id [{}] is still in upload after [{}] hours, setting status to [{}]'.format(
video['edx_video_id'], MAX_UPLOAD_HOURS, new_status
)
send_video_status_update([
{
'edxVideoId': video['edx_video_id'],
'status': new_status,
'message': message
}
])
elif video['status'] == 'invalid_token':
status = StatusDisplayStrings.get('youtube_duplicate')
elif is_video_encodes_ready:
status = StatusDisplayStrings.get('file_complete')
else:
status = StatusDisplayStrings.get(video['status'])
return status
def _get_videos(course, pagination_conf=None):
"""
Retrieves the list of videos from VAL corresponding to this course.
"""
videos, pagination_context = get_videos_for_course(
str(course.id),
VideoSortField.created,
SortDirection.desc,
pagination_conf
)
videos = list(videos)
# This is required to see if edx video pipeline is enabled while converting the video status.
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
transcription_statuses = ['transcription_in_progress', 'transcript_ready', 'partial_failure', 'transcript_failed']
# convert VAL's status to studio's Video Upload feature status.
for video in videos:
# If we are using "new video workflow" and status is in `transcription_statuses` then video encodes are ready.
# This is because Transcription starts once all the encodes are complete except for YT, but according to
# "new video workflow" YT is disabled as well as deprecated. So, Its precise to say that the Transcription
# starts once all the encodings are complete *for the new video workflow*.
is_video_encodes_ready = not course_video_upload_token and (video['status'] in transcription_statuses)
# Update with transcript languages
video['transcripts'] = get_available_transcript_languages(video_id=video['edx_video_id'])
video['transcription_status'] = (
StatusDisplayStrings.get(video['status']) if is_video_encodes_ready else ''
)
video['transcript_urls'] = {}
for language_code in video['transcripts']:
video['transcript_urls'][language_code] = get_video_transcript_url(
video_id=video['edx_video_id'],
language_code=language_code,
)
# Convert the video status.
video['status'] = convert_video_status(video, is_video_encodes_ready)
return videos, pagination_context
def _get_default_video_image_url():
"""
Returns default video image url
"""
return staticfiles_storage.url(settings.VIDEO_IMAGE_DEFAULT_FILENAME)
def _get_index_videos(course, pagination_conf=None):
"""
Returns the information about each video upload required for the video list
"""
course_id = str(course.id)
attrs = [
'edx_video_id', 'client_video_id', 'created', 'duration',
'status', 'courses', 'transcripts', 'transcription_status',
'transcript_urls', 'error_description'
]
def _get_values(video):
"""
Get data for predefined video attributes.
"""
values = {}
for attr in attrs:
if attr == 'courses':
course = [c for c in video['courses'] if course_id in c]
(__, values['course_video_image_url']), = list(course[0].items())
else:
values[attr] = video[attr]
return values
videos, pagination_context = _get_videos(course, pagination_conf)
return [_get_values(video) for video in videos], pagination_context
return convert_video_status_source_function(video, is_video_encodes_ready)
def get_all_transcript_languages():
"""
Returns all possible languages for transcript.
Exposes helper method without breaking existing bindings/dependencies
"""
third_party_transcription_languages = {}
transcription_plans = get_3rd_party_transcription_plans()
cielo_fidelity = transcription_plans[TranscriptProvider.CIELO24]['fidelity']
# Get third party transcription languages.
third_party_transcription_languages.update(transcription_plans[TranscriptProvider.THREE_PLAY_MEDIA]['languages'])
third_party_transcription_languages.update(cielo_fidelity['MECHANICAL']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PREMIUM']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PROFESSIONAL']['languages'])
all_languages_dict = dict(settings.ALL_LANGUAGES, **third_party_transcription_languages)
# Return combined system settings and 3rd party transcript languages.
all_languages = []
for key, value in sorted(all_languages_dict.items(), key=lambda k_v: k_v[1]):
all_languages.append({
'language_code': key,
'language_text': value
})
return all_languages
return get_all_transcript_languages_source_function()
def videos_index_html(course, pagination_conf=None):
"""
Returns an HTML page to display previous video uploads and allow new ones
Exposes helper method without breaking existing bindings/dependencies
"""
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
previous_uploads, pagination_context = _get_index_videos(course, pagination_conf)
context = {
'context_course': course,
'image_upload_url': reverse_course_url('video_images_handler', str(course.id)),
'video_handler_url': reverse_course_url('videos_handler', str(course.id)),
'encodings_download_url': reverse_course_url('video_encodings_download', str(course.id)),
'default_video_image_url': _get_default_video_image_url(),
'previous_uploads': previous_uploads,
'concurrent_upload_limit': settings.VIDEO_UPLOAD_PIPELINE.get('CONCURRENT_UPLOAD_LIMIT', 0),
'video_supported_file_formats': list(VIDEO_SUPPORTED_FILE_FORMATS.keys()),
'video_upload_max_file_size': VIDEO_UPLOAD_MAX_FILE_SIZE_GB,
'video_image_settings': {
'video_image_upload_enabled': VIDEO_IMAGE_UPLOAD_ENABLED.is_enabled(),
'max_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MAX_BYTES'],
'min_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'],
'max_width': settings.VIDEO_IMAGE_MAX_WIDTH,
'max_height': settings.VIDEO_IMAGE_MAX_HEIGHT,
'supported_file_formats': settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS
},
'is_video_transcript_enabled': is_video_transcript_enabled,
'active_transcript_preferences': None,
'transcript_credentials': None,
'transcript_available_languages': get_all_transcript_languages(),
'video_transcript_settings': {
'transcript_download_handler_url': reverse('transcript_download_handler'),
'transcript_upload_handler_url': reverse('transcript_upload_handler'),
'transcript_delete_handler_url': reverse_course_url('transcript_delete_handler', str(course.id)),
'trancript_download_file_format': Transcript.SRT
},
'pagination_context': pagination_context
}
if is_video_transcript_enabled:
context['video_transcript_settings'].update({
'transcript_preferences_handler_url': reverse_course_url(
'transcript_preferences_handler',
str(course.id)
),
'transcript_credentials_handler_url': reverse_course_url(
'transcript_credentials_handler',
str(course.id)
),
'transcription_plans': get_3rd_party_transcription_plans(),
})
context['active_transcript_preferences'] = get_transcript_preferences(str(course.id))
# Cached state for transcript providers' credentials (org-specific)
context['transcript_credentials'] = get_transcript_credentials_state_for_org(course.id.org)
return render_to_response('videos_index.html', context)
return videos_index_html_source_function(course, pagination_conf)
def videos_index_json(course):
"""
Returns JSON in the following format:
{
'videos': [{
'edx_video_id': 'aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa',
'client_video_id': 'video.mp4',
'created': '1970-01-01T00:00:00Z',
'duration': 42.5,
'status': 'upload',
'course_video_image_url': 'https://video/images/1234.jpg'
}]
}
Exposes helper method without breaking existing bindings/dependencies
"""
index_videos, __ = _get_index_videos(course)
return JsonResponse({"videos": index_videos}, status=200)
return videos_index_json_source_function(course)
def videos_post(course, request):
"""
Input (JSON):
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
Returns (JSON):
{
"files": [{
"file_name": "video.mp4",
"upload_url": "http://example.com/put_video"
}]
}
The returned array corresponds exactly to the input array.
Exposes helper method without breaking existing bindings/dependencies
"""
error = None
data = request.json
if 'files' not in data:
error = "Request object is not JSON or does not contain 'files'"
elif any(
'file_name' not in file or 'content_type' not in file
for file in data['files']
):
error = "Request 'files' entry does not contain 'file_name' and 'content_type'"
elif any(
file['content_type'] not in list(VIDEO_SUPPORTED_FILE_FORMATS.values())
for file in data['files']
):
error = "Request 'files' entry contain unsupported content_type"
if error:
return {'error': error}, 400
bucket = storage_service_bucket()
req_files = data['files']
resp_files = []
for req_file in req_files:
file_name = req_file['file_name']
try:
file_name.encode('ascii')
except UnicodeEncodeError:
error_msg = 'The file name for %s must contain only ASCII characters.' % file_name
return {'error': error_msg}, 400
edx_video_id = str(uuid4())
key = storage_service_key(bucket, file_name=edx_video_id)
metadata_list = [
('client_video_id', file_name),
('course_key', str(course.id)),
]
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
# Only include `course_video_upload_token` if youtube has not been deprecated
# for this course.
if not DEPRECATE_YOUTUBE.is_enabled(course.id) and course_video_upload_token:
metadata_list.append(('course_video_upload_token', course_video_upload_token))
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
if is_video_transcript_enabled:
transcript_preferences = get_transcript_preferences(str(course.id))
if transcript_preferences is not None:
metadata_list.append(('transcript_preferences', json.dumps(transcript_preferences)))
for metadata_name, value in metadata_list:
key.set_metadata(metadata_name, value)
upload_url = key.generate_url(
KEY_EXPIRATION_IN_SECONDS,
'PUT',
headers={'Content-Type': req_file['content_type']}
)
# persist edx_video_id in VAL
create_video({
'edx_video_id': edx_video_id,
'status': 'upload',
'client_video_id': file_name,
'duration': 0,
'encoded_videos': [],
'courses': [str(course.id)]
})
resp_files.append({'file_name': file_name, 'upload_url': upload_url, 'edx_video_id': edx_video_id})
return {'files': resp_files}, 200
return videos_post_source_function(course, request)
def storage_service_bucket():
"""
Returns an S3 bucket for video upload.
Exposes helper method without breaking existing bindings/dependencies
"""
if ENABLE_DEVSTACK_VIDEO_UPLOADS.is_enabled():
params = {
'aws_access_key_id': settings.AWS_ACCESS_KEY_ID,
'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY,
'security_token': settings.AWS_SECURITY_TOKEN
}
else:
params = {
'aws_access_key_id': settings.AWS_ACCESS_KEY_ID,
'aws_secret_access_key': settings.AWS_SECRET_ACCESS_KEY
}
conn = S3Connection(**params)
# We don't need to validate our bucket, it requires a very permissive IAM permission
# set since behind the scenes it fires a HEAD request that is equivalent to get_all_keys()
# meaning it would need ListObjects on the whole bucket, not just the path used in each
# environment (since we share a single bucket for multiple deployments in some configurations)
return conn.get_bucket(settings.VIDEO_UPLOAD_PIPELINE['VEM_S3_BUCKET'], validate=False)
return storage_service_bucket_source_function()
def storage_service_key(bucket, file_name):
"""
Returns an S3 key to the given file in the given bucket.
Exposes helper method without breaking existing bindings/dependencies
"""
key_name = "{}/{}".format(
settings.VIDEO_UPLOAD_PIPELINE.get("ROOT_PATH", ""),
file_name
)
return s3.key.Key(bucket, key_name)
return storage_service_key_source_function(bucket, file_name)
def send_video_status_update(updates):
"""
Update video status in edx-val.
Exposes helper method without breaking existing bindings/dependencies
"""
for update in updates:
update_video_status(update.get('edxVideoId'), update.get('status'))
LOGGER.info(
'VIDEOS: Video status update with id [%s], status [%s] and message [%s]',
update.get('edxVideoId'),
update.get('status'),
update.get('message')
)
return JsonResponse()
return send_video_status_update_source_function(updates)
def is_status_update_request(request_data):
"""
Returns True if `request_data` contains status update else False.
Exposes helper method without breaking existing bindings/dependencies
"""
return any('status' in update for update in request_data)
def _generate_pagination_configuration(course_key_string, request):
"""
Returns pagination configuration
"""
course_key = CourseKey.from_string(course_key_string)
if not ENABLE_VIDEO_UPLOAD_PAGINATION.is_enabled(course_key):
return None
return {
'page_number': request.GET.get('page', 1),
'videos_per_page': request.session.get("VIDEOS_PER_PAGE", VIDEOS_PER_PAGE)
}
def _is_pagination_context_update_request(request):
"""
Checks if request contains `videos_per_page`
"""
return request.POST.get('id', '') == "videos_per_page"
def _update_pagination_context(request):
"""
Updates session with posted value
"""
error_msg = _('A non zero positive integer is expected')
try:
videos_per_page = int(request.POST.get('value'))
if videos_per_page <= 0:
return JsonResponse({'error': error_msg}, status=500)
except ValueError:
return JsonResponse({'error': error_msg}, status=500)
request.session['VIDEOS_PER_PAGE'] = videos_per_page
return JsonResponse()
return is_status_update_request_source_function(request_data)

View File

@@ -1,6 +0,0 @@
"""
Xblock services that contain the business logic for xblock views.
"""
from .create_xblock import *
from .xblock_helpers import *
from .xblock_service import *

View File

@@ -0,0 +1,9 @@
"""
The xblock_storage_handlers folder contains service methods that implement the business logic for view endpoints
located in contentstore/views/block.py. It is renamed to xblock_storage_handlers to reflect its responsibility
of handling storage-related operations of xblocks, such as creation, retrieval, and deletion.
The view_handlers.py file includes business methods called by the view endpoints.
These methods, such as handle_xblock, delete_orphans, etc., interact with the required modulestore methods,
handle any errors, and aggregate and serialize data for the response.
"""

View File

@@ -38,7 +38,9 @@ from xblock.core import XBlock
from xblock.fields import Scope
from cms.djangoapps.contentstore.config.waffle import SHOW_REVIEW_RULES_FLAG
from cms.djangoapps.contentstore.toggles import ENABLE_COPY_PASTE_UNITS
from cms.djangoapps.models.settings.course_grading import CourseGradingModel
from cms.lib.ai_aside_summary_config import AiAsideSummaryConfig
from common.djangoapps.edxmako.services import MakoService
from common.djangoapps.static_replace import replace_static_urls
from common.djangoapps.student.auth import (
@@ -116,15 +118,6 @@ CREATE_IF_NOT_FOUND = ["course_info"]
NEVER = lambda x: False
ALWAYS = lambda x: True
__all__ = [
"handle_xblock",
"create_xblock_info",
"load_services_for_studio",
"get_block_info",
"get_xblock",
"delete_orphans",
]
def _filter_entrance_exam_grader(graders):
"""
@@ -152,6 +145,32 @@ def _is_library_component_limit_reached(usage_key):
return total_children + 1 > settings.MAX_BLOCKS_PER_CONTENT_LIBRARY
def _get_block_parent_children(xblock):
'''
Extract parent ID information from the given xblock and report it in the response
Extract child ID information from the given xblock and report it in the response
Note that no effort is made to look up all settings for this xblock's parent or childrent;
the blocks are merely identified. If further informaiton regarding them is required, another
call with those blocks as subjects may be made into this handler.
'''
response = {}
if hasattr(xblock, "parent") and xblock.parent:
response["parent"] = {
"block_type": xblock.parent.block_type,
"block_id": xblock.parent.block_id
}
if hasattr(xblock, "children") and xblock.children:
response["children"] = [
{
"block_type": child.block_type,
"block_id": child.block_id
}
for child in xblock.children
]
return response
def handle_xblock(request, usage_key_string=None):
"""
Service method with all business logic for handling xblock requests.
@@ -188,6 +207,9 @@ def handle_xblock(request, usage_key_string=None):
# TODO: pass fields to get_block_info and only return those
with modulestore().bulk_operations(usage_key.course_key):
response = get_block_info(get_xblock(usage_key, request.user))
if "customReadToken" in fields:
parent_children = _get_block_parent_children(get_xblock(usage_key, request.user))
response.update(parent_children)
return JsonResponse(response)
else:
return HttpResponse(status=406)
@@ -274,6 +296,7 @@ def handle_xblock(request, usage_key_string=None):
def modify_xblock(usage_key, request):
request_data = request.json
print(f'In modify_xblock with data = {request_data.get("data")}, fields = {request_data.get("fields")}')
return _save_xblock(
request.user,
get_xblock(usage_key, request.user),
@@ -288,6 +311,7 @@ def modify_xblock(usage_key, request):
prereq_min_completion=request_data.get("prereqMinCompletion"),
publish=request_data.get("publish"),
fields=request_data.get("fields"),
summary_configuration_enabled=request_data.get("summary_configuration_enabled"),
)
@@ -362,6 +386,7 @@ def _save_xblock( # lint-amnesty, pylint: disable=too-many-statements
prereq_min_completion=None,
publish=None,
fields=None,
summary_configuration_enabled=None,
):
"""
Saves xblock w/ its fields. Has special processing for grader_type, publish, and nullout and Nones in metadata.
@@ -538,6 +563,12 @@ def _save_xblock( # lint-amnesty, pylint: disable=too-many-statements
if publish == "make_public":
modulestore().publish(xblock.location, user.id)
# If summary_configuration_enabled is not None, use AIAsideSummary to update it.
if xblock.category == "vertical" and summary_configuration_enabled is not None:
AiAsideSummaryConfig(course.id).set_summary_settings(xblock.location, {
'enabled': summary_configuration_enabled
})
# Note that children aren't being returned until we have a use case.
return JsonResponse(result, encoder=EdxJSONEncoder)
@@ -891,7 +922,6 @@ def _duplicate_block(
@login_required
@expect_json
def delete_item(request, usage_key):
"""
Exposes internal helper method without breaking existing bindings/dependencies
@@ -977,6 +1007,7 @@ def get_block_info(
rewrite_static_links=True,
include_ancestor_info=False,
include_publishing_info=False,
include_children_predicate=False,
):
"""
metadata, data, id representation of a leaf block fetcher.
@@ -1000,6 +1031,7 @@ def get_block_info(
data=data,
metadata=own_metadata(xblock),
include_ancestor_info=include_ancestor_info,
include_children_predicate=include_children_predicate
)
if include_publishing_info:
add_container_page_publishing_info(xblock, xblock_info)
@@ -1058,6 +1090,7 @@ def create_xblock_info( # lint-amnesty, pylint: disable=too-many-statements
user=None,
course=None,
is_concise=False,
summary_configuration=None,
):
"""
Creates the information needed for client-side XBlockInfo.
@@ -1107,6 +1140,10 @@ def create_xblock_info( # lint-amnesty, pylint: disable=too-many-statements
should_visit_children = include_child_info and (
course_outline and not is_xblock_unit or not course_outline
)
if summary_configuration is None:
summary_configuration = AiAsideSummaryConfig(xblock.location.course_key)
if should_visit_children and xblock.has_children:
child_info = _create_xblock_child_info(
xblock,
@@ -1116,6 +1153,7 @@ def create_xblock_info( # lint-amnesty, pylint: disable=too-many-statements
user=user,
course=course,
is_concise=is_concise,
summary_configuration=summary_configuration,
)
else:
child_info = None
@@ -1356,6 +1394,9 @@ def create_xblock_info( # lint-amnesty, pylint: disable=too-many-statements
else:
xblock_info["staff_only_message"] = False
# If the ENABLE_COPY_PASTE_UNITS feature flag is enabled, we show the newer menu that allows copying/pasting
xblock_info["enable_copy_paste_units"] = ENABLE_COPY_PASTE_UNITS.is_enabled()
xblock_info[
"has_partition_group_components"
] = has_children_visible_to_specific_partition_groups(xblock)
@@ -1363,6 +1404,9 @@ def create_xblock_info( # lint-amnesty, pylint: disable=too-many-statements
xblock, course=course
)
if is_xblock_unit and summary_configuration.is_enabled():
xblock_info["summary_configuration_enabled"] = summary_configuration.is_summary_enabled(xblock_info['id'])
return xblock_info
@@ -1556,6 +1600,7 @@ def _create_xblock_child_info(
user=None,
course=None,
is_concise=False,
summary_configuration=None,
):
"""
Returns information about the children of an xblock, as well as about the primary category
@@ -1582,6 +1627,7 @@ def _create_xblock_child_info(
user=user,
course=course,
is_concise=is_concise,
summary_configuration=summary_configuration,
)
for child in xblock.get_children()
]

View File

@@ -94,7 +94,7 @@ class MaintenanceBaseView(View):
"""
A short method to render_to_response that renders response.
"""
if self.request.is_ajax():
if self.request.headers.get('x-requested-with') == 'XMLHttpRequest':
return JsonResponse(self.context)
return render_to_response(self.template, self.context)

Some files were not shown because too many files have changed in this diff Show More