build: run ShellCheck in CI (#31809)
build: run ShellCheck Adds a ShellCheck check to edx-platform PRs and master, using the shared workflow & template from the .github repo. This will become a "required" check once it passes for 2 straight weeks on master. Brings all existing shell scripts into compliance with ShellCheck.
This commit is contained in:
32
.github/workflows/shellcheck.yml
vendored
Normal file
32
.github/workflows/shellcheck.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Run ShellCheck on PRs and master
|
||||
|
||||
# For more context, see:
|
||||
# https://github.com/openedx/.github/blob/master/docs/decisions/0001-shellcheck.rst
|
||||
|
||||
name: ShellCheck
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ["ubuntu", "macos"]
|
||||
uses: openedx/.github/.github/workflows/shellcheck.yml@master
|
||||
with:
|
||||
# For details on the meaning of each of these arguments, see:
|
||||
# https://github.com/openedx/.github/blob/master/.github/workflows/shellcheck.yml
|
||||
# We exclude `./node_modules/*` by default because we want people to easily be able to
|
||||
# copy and run the command locally. Local copies of most of our services have a `./node_modules`
|
||||
# directory that we want to ignore.
|
||||
exclude-patterns: "./node_modules/*"
|
||||
operating-system: "${{ matrix.os }}"
|
||||
shellcheck-version: "v0.9.0"
|
||||
#shellcheck-options: ""
|
||||
@@ -1,2 +1,4 @@
|
||||
#!/usr/bin/env zsh
|
||||
# shellcheck disable=all
|
||||
# ^ This is zsh, which shellcheck doesn't support.
|
||||
git log --all ^opaque-keys-merge-base --format=%H $1 | while read f; do git branch --contains $f; done | sort -u
|
||||
|
||||
@@ -13,13 +13,13 @@ set -e
|
||||
# Violations thresholds for failing the build
|
||||
source scripts/thresholds.sh
|
||||
|
||||
XSSLINT_THRESHOLDS=`cat scripts/xsslint_thresholds.json`
|
||||
XSSLINT_THRESHOLDS=$(cat scripts/xsslint_thresholds.json)
|
||||
export XSSLINT_THRESHOLDS=${XSSLINT_THRESHOLDS//[[:space:]]/}
|
||||
|
||||
|
||||
# Run appropriate CI system script
|
||||
if [ -n "$SCRIPT_TO_RUN" ] ; then
|
||||
$SCRIPT_TO_RUN
|
||||
# Run appropriate CI system script (with args if provided)
|
||||
if [ -n "${SCRIPT_TO_RUN[*]}" ] ; then
|
||||
"${SCRIPT_TO_RUN[@]}"
|
||||
|
||||
# Exit with the exit code of the called script
|
||||
exit $?
|
||||
|
||||
@@ -55,7 +55,7 @@ git clean -qxfd
|
||||
|
||||
function emptyxunit {
|
||||
|
||||
cat > reports/$1.xml <<END
|
||||
cat > "reports/$1.xml" <<END
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuite name="$1" tests="1" errors="0" failures="0" skip="0">
|
||||
<testcase classname="pavelib.quality" name="$1" time="0.604"></testcase>
|
||||
@@ -82,19 +82,18 @@ else
|
||||
fi
|
||||
|
||||
PAVER_ARGS="-v"
|
||||
PARALLEL="--processes=-1"
|
||||
export SUBSET_JOB=$JOB_NAME
|
||||
|
||||
function run_paver_quality {
|
||||
QUALITY_TASK=$1
|
||||
shift
|
||||
mkdir -p test_root/log/
|
||||
LOG_PREFIX=test_root/log/$QUALITY_TASK
|
||||
$TOX paver $QUALITY_TASK $* 2> $LOG_PREFIX.err.log > $LOG_PREFIX.out.log || {
|
||||
LOG_PREFIX="test_root/log/$QUALITY_TASK"
|
||||
$TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || {
|
||||
echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):";
|
||||
tail -n 100 $LOG_PREFIX.out.log;
|
||||
tail -n 100 "$LOG_PREFIX.out.log"
|
||||
echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):";
|
||||
tail -n 100 $LOG_PREFIX.err.log;
|
||||
tail -n 100 "$LOG_PREFIX.err.log"
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -103,6 +102,7 @@ function run_paver_quality {
|
||||
case "$TEST_SUITE" in
|
||||
|
||||
"quality")
|
||||
EXIT=0
|
||||
|
||||
mkdir -p reports
|
||||
|
||||
@@ -128,11 +128,11 @@ case "$TEST_SUITE" in
|
||||
echo "Finding pycodestyle violations and storing report..."
|
||||
run_paver_quality run_pep8 || { EXIT=1; }
|
||||
echo "Finding ESLint violations and storing report..."
|
||||
run_paver_quality run_eslint -l $ESLINT_THRESHOLD || { EXIT=1; }
|
||||
run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; }
|
||||
echo "Finding Stylelint violations and storing report..."
|
||||
run_paver_quality run_stylelint || { EXIT=1; }
|
||||
echo "Running xss linter report."
|
||||
run_paver_quality run_xsslint -t $XSSLINT_THRESHOLDS || { EXIT=1; }
|
||||
run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; }
|
||||
echo "Running PII checker on all Django models..."
|
||||
run_paver_quality run_pii_check || { EXIT=1; }
|
||||
echo "Running reserved keyword checker on all Django models..."
|
||||
@@ -144,7 +144,7 @@ case "$TEST_SUITE" in
|
||||
# Need to create an empty test result so the post-build
|
||||
# action doesn't fail the build.
|
||||
emptyxunit "stub"
|
||||
exit $EXIT
|
||||
exit "$EXIT"
|
||||
;;
|
||||
|
||||
"js-unit")
|
||||
@@ -153,6 +153,7 @@ case "$TEST_SUITE" in
|
||||
;;
|
||||
|
||||
"pavelib-js-unit")
|
||||
EXIT=0
|
||||
$TOX paver test_js --coverage --skip-clean || { EXIT=1; }
|
||||
paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; }
|
||||
|
||||
@@ -167,6 +168,6 @@ case "$TEST_SUITE" in
|
||||
# Note that by default the value of this variable EXIT is not set, so if
|
||||
# neither command fails then the exit command resolves to simply exit
|
||||
# which is considered successful.
|
||||
exit $EXIT
|
||||
exit "$EXIT"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
# shellcheck disable=all
|
||||
# ^ Paver in edx-platform is on the way out
|
||||
# (https://github.com/openedx/edx-platform/issues/31798)
|
||||
# so we're not going to bother fixing these shellcheck
|
||||
# violations.
|
||||
|
||||
# Courtesy of Gregory Nicholas
|
||||
|
||||
_subcommand_opts()
|
||||
|
||||
@@ -62,23 +62,23 @@ calculate_migrations() {
|
||||
output_file="common/test/db_cache/bok_choy_${db}_migrations.yaml"
|
||||
# Redirect stdout to /dev/null because the script will print
|
||||
# out all migrations to both stdout and the output file.
|
||||
./manage.py lms --settings $SETTINGS show_unapplied_migrations --database $db --output_file $output_file 1>/dev/null
|
||||
./manage.py lms --settings "$SETTINGS" show_unapplied_migrations --database "$db" --output_file "$output_file" 1>/dev/null
|
||||
}
|
||||
|
||||
run_migrations() {
|
||||
echo "Running the lms migrations on the $db bok_choy DB."
|
||||
./manage.py lms --settings $SETTINGS migrate --database $db --traceback --noinput
|
||||
./manage.py lms --settings "$SETTINGS" migrate --database "$db" --traceback --noinput
|
||||
echo "Running the cms migrations on the $db bok_choy DB."
|
||||
./manage.py cms --settings $SETTINGS migrate --database $db --traceback --noinput
|
||||
./manage.py cms --settings "$SETTINGS" migrate --database "$db" --traceback --noinput
|
||||
}
|
||||
|
||||
load_cache_into_db() {
|
||||
echo "Loading the schema from the filesystem into the $db MySQL DB."
|
||||
mysql $MYSQL_HOST -u root "${databases["$db"]}" < $DB_CACHE_DIR/bok_choy_schema_$db.sql
|
||||
mysql "$MYSQL_HOST" -u root "${databases["$db"]}" < "$DB_CACHE_DIR/bok_choy_schema_$db.sql"
|
||||
echo "Loading the fixture data from the filesystem into the $db MySQL DB."
|
||||
./manage.py lms --settings $SETTINGS loaddata --database $db $DB_CACHE_DIR/bok_choy_data_$db.json
|
||||
./manage.py lms --settings "$SETTINGS" loaddata --database "$db" "$DB_CACHE_DIR/bok_choy_data_$db.json"
|
||||
echo "Loading the migration data from the filesystem into the $db MySQL DB."
|
||||
mysql $MYSQL_HOST -u root "${databases["$db"]}" < $DB_CACHE_DIR/bok_choy_migrations_data_$db.sql
|
||||
mysql "$MYSQL_HOST" -u root "${databases["$db"]}" < "$DB_CACHE_DIR/bok_choy_migrations_data_$db.sql"
|
||||
}
|
||||
|
||||
rebuild_cache_for_db() {
|
||||
@@ -87,13 +87,13 @@ rebuild_cache_for_db() {
|
||||
|
||||
# Dump the schema and data to the cache
|
||||
echo "Using the dumpdata command to save the $db fixture data to the filesystem."
|
||||
./manage.py lms --settings $SETTINGS dumpdata --database $db > $DB_CACHE_DIR/bok_choy_data_$db.json --exclude=api_admin.Catalog
|
||||
./manage.py lms --settings "$SETTINGS" dumpdata --database "$db" > "$DB_CACHE_DIR/bok_choy_data_$db.json" --exclude=api_admin.Catalog
|
||||
echo "Saving the schema of the $db bok_choy DB to the filesystem."
|
||||
mysqldump $MYSQL_HOST -u root --no-data --skip-comments --skip-dump-date "${databases[$db]}" > $DB_CACHE_DIR/bok_choy_schema_$db.sql
|
||||
mysqldump "$MYSQL_HOST" -u root --no-data --skip-comments --skip-dump-date "${databases[$db]}" > "$DB_CACHE_DIR/bok_choy_schema_$db.sql"
|
||||
|
||||
# dump_data does not dump the django_migrations table so we do it separately.
|
||||
echo "Saving the django_migrations table of the $db bok_choy DB to the filesystem."
|
||||
mysqldump $MYSQL_HOST -u root --no-create-info --skip-comments --skip-dump-date "${databases["$db"]}" django_migrations > $DB_CACHE_DIR/bok_choy_migrations_data_$db.sql
|
||||
mysqldump $MYSQL_HOST -u root --no-create-info --skip-comments --skip-dump-date "${databases["$db"]}" django_migrations > "$DB_CACHE_DIR/bok_choy_migrations_data_$db.sql"
|
||||
}
|
||||
|
||||
for db in "${database_order[@]}"; do
|
||||
@@ -107,7 +107,7 @@ for db in "${database_order[@]}"; do
|
||||
# or a jenkins worker environment) that already ran tests on another commit that had
|
||||
# different migrations that created, dropped, or altered tables.
|
||||
echo "Issuing a reset_db command to the $db bok_choy MySQL database."
|
||||
./manage.py lms --settings $SETTINGS reset_db --traceback --router $db
|
||||
./manage.py lms --settings "$SETTINGS" reset_db --traceback --router "$db"
|
||||
fi
|
||||
|
||||
if ! [[ $CALCULATE_MIGRATIONS ]]; then
|
||||
|
||||
@@ -31,19 +31,19 @@ set -e
|
||||
############################################################################
|
||||
|
||||
OUTPUT_DIR="reports/vulture"
|
||||
mkdir -p ${OUTPUT_DIR}
|
||||
OUTPUT_FILE=${OUTPUT_DIR}/vulture-report.txt
|
||||
echo '' > $OUTPUT_FILE
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
OUTPUT_FILE="${OUTPUT_DIR}/vulture-report.txt"
|
||||
echo '' > "$OUTPUT_FILE"
|
||||
# exclude test code from analysis, as it isn't explicitly called by other
|
||||
# code. Additionally, application code that is only called by tests
|
||||
# should be considered dead
|
||||
EXCLUSIONS='/test,/acceptance,cms/envs,lms/envs,/terrain,migrations/,signals.py'
|
||||
MIN_CONFIDENCE=90
|
||||
# paths to the code on which to run the analysis
|
||||
CODE_PATHS='cms common lms openedx'
|
||||
CODE_PATHS=('cms' 'common' 'lms' 'openedx')
|
||||
WHITELIST_PATH="$(dirname "${BASH_SOURCE[0]}")/whitelist.py"
|
||||
echo "Checking for dead code in the following paths: $CODE_PATHS"
|
||||
echo "Checking for dead code in the following paths: ${CODE_PATHS[*]}"
|
||||
echo "Results can be found in $OUTPUT_FILE"
|
||||
vulture $CODE_PATHS $WHITELIST_PATH --exclude $EXCLUSIONS \
|
||||
--min-confidence $MIN_CONFIDENCE \
|
||||
--sort-by-size |tac > $OUTPUT_FILE
|
||||
vulture "${CODE_PATHS[@]}" "$WHITELIST_PATH" --exclude "$EXCLUSIONS" \
|
||||
--min-confidence "$MIN_CONFIDENCE" \
|
||||
--sort-by-size |tac > "$OUTPUT_FILE"
|
||||
|
||||
Reference in New Issue
Block a user