Skip to content

added password updater rate limiting #13959

added password updater rate limiting

added password updater rate limiting #13959

Workflow file for this run

##############################################################################
##############################################################################
#
# NOTE!
#
# Please read the README.md file in this directory that defines what should
# be placed in this file.
#
##############################################################################
##############################################################################
name: Pull request workflow
on:
pull_request:
branches:
- "**"
env:
CODECOV_UNIQUE_NAME: CODECOV_UNIQUE_NAME-${{ github.run_id }}-${{ github.run_number }}
jobs:
check_base_branch:
# only run the job if the pull request actor is not dependabot
if: ${{ github.actor != 'dependabot[bot]' }}
name: Check base branch of the pull request to be develop
runs-on: ubuntu-latest
steps:
- if: github.event.pull_request.base.ref != 'develop'
name: Check base branch
run: |
echo "Pull requests are only allowed against the 'develop' branch. Please refer to the pull request guidelines."
echo "Error: Close this PR and try again."
exit 1
Code-Quality-Checks:
name: Checking code quality
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
with:
fetch-depth: 0 # Fetch all history for git diff
- name: Get changed files for error handling validation
id: changed-files
run: |
# Skip if not in PR context
if [ -z "${{ github.event.pull_request.base.sha }}" ]; then
echo "changed_files=" >> $GITHUB_OUTPUT
exit 0
fi
# Get the base branch ref
BASE_SHA=$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }})
# Get all changed files
CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRT $BASE_SHA ${{ github.event.pull_request.head.sha }} | tr '\n' ' ')
echo "changed_files=${CHANGED_FILES}" >> $GITHUB_OUTPUT
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check code format
run: docker container run talawa_api pnpm format:check
- name: Check TSDoc comments
run: docker container run talawa_api pnpm lint:tsdoc
- name: Check sanitization
run: docker container run talawa_api pnpm lint:sanitization
- name: Validate error handling standards
env:
CHANGED_FILES: ${{ steps.changed-files.outputs.changed_files }}
run: docker container run -e CHANGED_FILES="$CHANGED_FILES" talawa_api pnpm validate:error-handling
- name: Check if the source and target branches are different
if: ${{ github.event.pull_request.base.ref == github.event.pull_request.head.ref }}
run: |
echo "Source Branch ${{ github.event.pull_request.head.ref }}"
echo "Target Branch ${{ github.event.pull_request.base.ref }}"
echo "Error: Source and Target Branches are the same. Please ensure they are different."
echo "Error: Close this PR and try again."
exit 1
- name: Lint shell scripts (shellcheck)
shell: bash
run: |
shopt -s globstar nullglob
files=(scripts/**/*.sh)
if [ ${#files[@]} -eq 0 ]; then
echo "No shell scripts found to lint."
else
shellcheck -x --severity=error "${files[@]}"
fi
Python-Compliance:
name: Check Python Code Style
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: 3.11
- name: Cache pip packages
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python3 -m venv venv
source venv/bin/activate
python -m pip install --upgrade pip
pip install -r .github/workflows/requirements.txt
- name: Run Black Formatter Check
run: |
source venv/bin/activate
black --check .
- name: Run Flake8 Linter
run: |
source venv/bin/activate
flake8 --docstring-convention google --ignore E402,E722,E203,F401,W503 .github
- name: Run pydocstyle
run: |
source venv/bin/activate
pydocstyle --convention=google --add-ignore=D415,D205 .github
- name: Run docstring compliance check
run: |
source venv/bin/activate
python .github/workflows/scripts/check_docstrings.py --directories .github
Count-Changed-Files:
uses: PalisadoesFoundation/.github/.github/workflows/count-changed-files.yml@main
python_checks:
name: Run Python Checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout centralized scripts
uses: actions/checkout@v4
with:
repository: PalisadoesFoundation/.github
path: .github-central
ref: main
- name: Get changed files
id: changed-files
run: |
ALL_CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRT ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }})
# Skip binary assets that cannot be decoded as UTF-8 by the disable-statements check
FILTERED_FILES=$(echo "$ALL_CHANGED_FILES" | grep -Ev '\.(png|jpg|jpeg|gif|webp)$' || true)
echo "all_changed_files=$(echo "$FILTERED_FILES" | tr '\n' ' ')" >> $GITHUB_OUTPUT
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Run Disable Statements Check
run: |
if [ -z "${{ steps.changed-files.outputs.all_changed_files }}" ]; then
echo "No eligible text files changed; skipping disable statements check."
exit 0
fi
python .github-central/.github/workflows/scripts/disable_statements_check.py --files ${{ steps.changed-files.outputs.all_changed_files }}
check_gql_tada:
name: Check gql tada files and configuration
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check gql tada
run: docker container run talawa_api pnpm check_gql_tada
check_drizzle_migrations:
name: Check drizzle migration files
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check drizzle migrations
run: |
JWT_SECRET=$(openssl rand -hex 32)
docker container run --env-file ./envFiles/.env.ci -e "API_JWT_SECRET=$JWT_SECRET" -e "API_AUTH_JWT_SECRET=$JWT_SECRET" talawa_api pnpm check_drizzle_migrations
check_type_errors:
name: Check type errors
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check type errors
run: docker container run talawa_api pnpm typecheck
check_mock_isolation:
name: Check mock isolation
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check mock isolation
run: docker container run talawa_api pnpm check_mock_isolation
check_rootless_production_config:
name: Check rootless production Docker config (rootless)
runs-on: ubuntu-latest
needs: [Run-Tests]
if: github.actor != 'dependabot[bot]'
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Ensure rootless production files exist
run: |
test -f ./envFiles/.env.rootless.production
test -f ./docker/compose.rootless.production.yaml
- name: Validate docker compose config (rootless env, no interpolation)
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config --no-interpolate -q
- name: Validate docker compose config (rootless env, interpolated)
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config -q
- name: Install rootless prerequisites
run: |
set -euxo pipefail
sudo apt-get update
sudo apt-get install -y uidmap dbus-user-session slirp4netns fuse-overlayfs ca-certificates curl gnupg
# Ensure the Docker apt repo is configured so we can install/upgrade packages.
if ! command -v dockerd-rootless-setuptool.sh >/dev/null 2>&1; then
sudo install -m 0755 -d /etc/apt/keyrings
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
fi
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
fi
# Always install/upgrade rootless extras AND compose plugin.
# The !override YAML tag in compose.rootless.production.yaml requires
# Docker Compose v2.24.4+; the runner's pre-installed version may be older.
# Pin to a minimum version that supports the !override tag.
if ! sudo apt-get install -y docker-ce-rootless-extras 'docker-compose-plugin=2.24.4-*'; then
echo "::notice::Pinned docker-compose-plugin=2.24.4-* not available; installing latest and will verify version below."
sudo apt-get install -y docker-ce-rootless-extras docker-compose-plugin
fi
echo "Docker Compose version:"
docker compose version
# Verify that the installed Compose version satisfies the minimum requirement.
COMPOSE_MIN="2.24.4"
compose_version=$(docker compose version --short 2>/dev/null || echo "0.0.0")
version_gte() {
printf '%s\n%s' "$2" "$1" | sort -V -C
}
if ! version_gte "$compose_version" "$COMPOSE_MIN"; then
echo "::error::Docker Compose v${COMPOSE_MIN}+ required for !override tag, found v${compose_version}"
exit 1
fi
echo "Docker Compose v${compose_version} >= v${COMPOSE_MIN} ✓"
- name: Start rootless Docker daemon
run: |
set -euxo pipefail
export XDG_RUNTIME_DIR="/run/user/$UID"
sudo mkdir -p "$XDG_RUNTIME_DIR"
sudo chown "$USER":"$USER" "$XDG_RUNTIME_DIR"
export PATH="$HOME/bin:$PATH"
if [ ! -f "$HOME/.config/systemd/user/docker.service" ]; then
dockerd-rootless-setuptool.sh install --force
fi
if ! systemctl --user daemon-reload 2>/dev/null; then
echo "::notice::systemctl daemon-reload failed (expected on runners without session bus)"
fi
if ! systemctl --user start docker 2>/dev/null; then
echo "::notice::systemctl start docker failed (expected on runners without session bus)"
fi
if ! systemctl --user --no-pager status docker >/dev/null 2>&1; then
echo "::notice::Falling back to manual dockerd-rootless.sh start"
nohup dockerd-rootless.sh > "$RUNNER_TEMP/dockerd-rootless.log" 2>&1 &
fi
chmod +x scripts/docker/resolve-docker-host.sh
eval "$(./scripts/docker/resolve-docker-host.sh --mode rootless --emit-export --warn-if-docker-group)"
echo "DOCKER_HOST=$DOCKER_HOST" >> "$GITHUB_ENV"
echo "XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR" >> "$GITHUB_ENV"
echo "$HOME/bin" >> "$GITHUB_PATH"
- name: Verify rootless daemon
run: |
set -euxo pipefail
chmod +x scripts/docker/resolve-docker-host.sh
eval "$(./scripts/docker/resolve-docker-host.sh --mode rootless --emit-export --warn-if-docker-group)"
TIMEOUT=60
until docker info >/dev/null 2>&1 || [ "$TIMEOUT" -le 0 ]; do
sleep 2
TIMEOUT=$((TIMEOUT - 2))
done
if ! docker info >/dev/null 2>&1; then
echo "Rootless Docker daemon did not start in time."
if [ -f "$RUNNER_TEMP/dockerd-rootless.log" ]; then
tail -n 200 "$RUNNER_TEMP/dockerd-rootless.log"
fi
exit 1
fi
docker info --format '{{json .SecurityOptions}}' | tee "$RUNNER_TEMP/rootless-security-options.json"
grep -qi rootless "$RUNNER_TEMP/rootless-security-options.json"
- name: Verify rootless works without docker group membership
run: |
set -euxo pipefail
# Remove the current user from the docker group to simulate a pure rootless
# environment where the user has no access to the rootful daemon socket.
if ! sudo deluser "$USER" docker 2>/dev/null; then
echo "::warning::Could not remove $USER from docker group (may not be a member)."
fi
# Verify removal took effect in /etc/group before proceeding.
if getent group docker | grep -qw "$USER"; then
echo "::error::$USER is still listed in docker group in /etc/group after deluser."
exit 1
fi
# Write test commands to a temp script so we can execute them in a
# fresh login shell that re-reads /etc/group via initgroups().
cat > "$RUNNER_TEMP/test-no-docker-group.sh" << 'TESTSCRIPT'
#!/usr/bin/env bash
set -euxo pipefail
# Confirm the docker group is no longer active.
if id -nG | tr " " "\n" | grep -qx docker; then
echo "::error::docker group is still active; cannot validate non-docker-group scenario."
exit 1
fi
# The rootless daemon must still be reachable via DOCKER_HOST.
docker info >/dev/null 2>&1
echo "Rootless Docker is reachable without docker group membership ✓"
# Validate compose config still parses correctly.
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config -q
echo "Compose config valid without docker group ✓"
TESTSCRIPT
chmod +x "$RUNNER_TEMP/test-no-docker-group.sh"
# `su --login` calls initgroups() which re-reads /etc/group, so the
# docker supplementary group is actually dropped (unlike sg which
# only changes the effective GID). Pass required env vars explicitly.
# NOTE: `su --login` resets CWD to $HOME; cd back to the workspace.
sudo su --login "$USER" -c "\
export DOCKER_HOST='$DOCKER_HOST' \
XDG_RUNTIME_DIR='$XDG_RUNTIME_DIR' \
PATH='$PATH'; \
cd '$GITHUB_WORKSPACE' && \
bash '$RUNNER_TEMP/test-no-docker-group.sh'"
- name: Set CI-safe secrets for rootless production template
run: |
set -euo pipefail
# Align build args with the current runner user to avoid permission mismatches.
echo "API_UID=$(id -u)" >> "$GITHUB_ENV"
echo "API_GID=$(id -g)" >> "$GITHUB_ENV"
# Override sentinel placeholders so the rootless production stack can start in CI.
echo "API_JWT_SECRET=ci_jwt_secret_0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" >> "$GITHUB_ENV"
echo "API_AUTH_JWT_SECRET=ci_auth_jwt_secret_0123456789abcdef" >> "$GITHUB_ENV"
echo "API_COOKIE_SECRET=ci_cookie_secret_0123456789abcdef0123456789abcdef" >> "$GITHUB_ENV"
echo "API_ADMINISTRATOR_USER_PASSWORD=ci_admin_password_0123456789abcdef" >> "$GITHUB_ENV"
# Ensure API <-> MinIO credentials match.
echo "API_MINIO_SECRET_KEY=ci_minio_secret_0123456789abcdef" >> "$GITHUB_ENV"
echo "MINIO_ROOT_PASSWORD=ci_minio_secret_0123456789abcdef" >> "$GITHUB_ENV"
# Ensure API <-> Postgres credentials match.
echo "API_POSTGRES_PASSWORD=ci_postgres_password_0123456789abcdef" >> "$GITHUB_ENV"
echo "POSTGRES_PASSWORD=ci_postgres_password_0123456789abcdef" >> "$GITHUB_ENV"
- name: Start rootless production stack
run: |
set -euo pipefail
dc() {
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml "$@"
}
echo "=== Docker Compose version ==="
docker compose version
echo "=== Resolved compose config ==="
dc config -q
# 1. Build all images first so build failures surface clearly.
dc build
# 2. Start infrastructure (postgres, redis, minio) and wait for them.
echo "=== Starting infrastructure services ==="
dc up -d postgres redis minio
# Wait for Postgres to accept connections (bounded retry, fail early).
PG_RETRIES=30
until dc exec postgres pg_isready -U talawa --timeout=2 2>/dev/null; do
PG_RETRIES=$((PG_RETRIES - 1))
if [ "$PG_RETRIES" -le 0 ]; then
echo "::error::Postgres did not become ready within 60 s."
dc logs --tail 50 postgres
exit 1
fi
echo "Waiting for Postgres... ($PG_RETRIES retries left)"
sleep 2
done
echo "Postgres is ready ✓"
echo "Infrastructure status:"
dc ps
# 3. Start API separately so we can capture its logs if it crashes.
echo "=== Starting API ==="
dc up -d api
# 4. Wait for API healthcheck.
api_id="$(dc ps -q api)"
TIMEOUT=120
until [ "$(docker inspect -f '{{.State.Health.Status}}' "$api_id" 2>/dev/null)" = "healthy" ] || [ "$TIMEOUT" -le 0 ]; do
# If the container exited, stop waiting immediately.
if [ "$(docker inspect -f '{{.State.Status}}' "$api_id" 2>/dev/null)" = "exited" ]; then
echo "::error::API container exited unexpectedly."
break
fi
echo "Waiting for API health... ($TIMEOUT s remaining)"
sleep 3
TIMEOUT=$((TIMEOUT - 3))
done
if [ "$(docker inspect -f '{{.State.Health.Status}}' "$api_id" 2>/dev/null)" != "healthy" ]; then
echo "::error::API did not become healthy."
echo "=== API container state ==="
docker inspect -f '{{json .State}}' "$api_id" || true
echo ""
echo "=== API healthcheck log ==="
docker inspect -f '{{json .State.Health}}' "$api_id" || true
echo ""
echo "=== API application logs ==="
dc logs --tail 300 api
echo "=== All services ==="
dc ps
exit 1
fi
echo "API is healthy. Starting Caddy..."
dc up -d caddy
- name: Verify API reachable via Caddy
run: |
set -euo pipefail
dc() {
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml "$@"
}
# Wait for Caddy port to be published (with retry)
CADDY_RETRIES=10
until http_port="$(dc port caddy 80 | head -n 1 | awk -F: '{print $2}')" && [ -n "$http_port" ]; do
CADDY_RETRIES=$((CADDY_RETRIES - 1))
if [ "$CADDY_RETRIES" -le 0 ]; then
echo "::error::Failed to determine published HTTP port for Caddy after 20s."
dc ps
exit 1
fi
echo "Waiting for Caddy port mapping... ($CADDY_RETRIES retries left)"
sleep 2
done
echo "Caddy port: $http_port"
# Poll Caddy until it's ready to accept connections
echo "Waiting for Caddy to initialize..."
CADDY_READY_RETRIES=10
until curl -fsS -m 2 "http://localhost:${http_port}/" >/dev/null 2>&1 || [ "$CADDY_READY_RETRIES" -le 0 ]; do
CADDY_READY_RETRIES=$((CADDY_READY_RETRIES - 1))
echo "Caddy not ready yet... ($CADDY_READY_RETRIES retries left)"
sleep 1
done
# Re-probe to determine actual success/failure (fixes off-by-one)
if ! curl -fsS -m 2 "http://localhost:${http_port}/" >/dev/null 2>&1; then
echo "::error::Caddy failed to become ready after 10s."
dc ps
dc logs caddy
exit 1
fi
echo "Caddy is ready!"
echo "Checking health endpoint via Caddy on http://localhost:${http_port}/healthcheck"
curl -fsS --retry 10 --retry-delay 2 --max-time 10 "http://localhost:${http_port}/healthcheck" > /dev/null
- name: Stop rootless production stack
if: always()
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
down -v
- name: Print rootless daemon logs on failure
if: failure()
run: |
if [ -f "$RUNNER_TEMP/dockerd-rootless.log" ]; then
tail -n 200 "$RUNNER_TEMP/dockerd-rootless.log"
fi
Check_unused_code:
name: Check for unused files, exports and dependencies
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4.2.2
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
- name: Setup pnpm
uses: pnpm/action-setup@v4
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run Knip for files and exports
run: pnpm knip --include files,exports
# Dummy environment variables required for Knip to load drizzle.config.ts during file parsing
env:
API_POSTGRES_DATABASE: dummy_db
API_POSTGRES_PASSWORD: dummy_password
API_POSTGRES_HOST: localhost
API_POSTGRES_PORT: "5432"
API_POSTGRES_USER: dummy_user
API_POSTGRES_SSL_MODE: "false"
- name: Run Knip for dependencies
run: pnpm knip --config knip.deps.json --include dependencies
# Dummy environment variables required for Knip to load drizzle.config.ts during file parsing
env:
API_POSTGRES_DATABASE: dummy_db
API_POSTGRES_PASSWORD: dummy_password
API_POSTGRES_HOST: localhost
API_POSTGRES_PORT: "5432"
API_POSTGRES_USER: dummy_user
API_POSTGRES_SSL_MODE: "false"
Check-Sensitive-Files:
if: ${{ github.actor != 'dependabot[bot]' }}
name: Checks if sensitive files have been changed without authorization
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout centralized CI/CD scripts
uses: actions/checkout@v4
with:
repository: PalisadoesFoundation/.github
ref: main
path: .github-central
- name: Make sensitive file checker executable
run: chmod +x .github-central/.github/workflows/scripts/sensitive_file_check.py
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Get PR labels
id: check-labels
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
LABELS="$(gh api repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels --jq '.[].name' | tr '\n' ' ')"
if echo "$LABELS" | grep -qw "ignore-sensitive-files-pr"; then
echo "::notice::Skipping sensitive files check due to 'ignore-sensitive-files-pr' label."
echo "skip=true" >> $GITHUB_OUTPUT
else
echo "skip=false" >> $GITHUB_OUTPUT
fi
- name: Get changed files
id: sensitive-changed-files
if: steps.check-labels.outputs.skip != 'true'
env:
BASE_REF_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_REF_SHA: ${{ github.event.pull_request.head.sha }}
run: |
set -euo pipefail
if [ -z "$BASE_REF_SHA" ] || [ -z "$HEAD_REF_SHA" ]; then
echo "::error::Missing BASE_REF_SHA or HEAD_REF_SHA"
exit 1
fi
echo "Base SHA: $BASE_REF_SHA"
echo "Head SHA: $HEAD_REF_SHA"
BASE_SHA=$(git merge-base "$BASE_REF_SHA" "$HEAD_REF_SHA")
echo "Merge base: $BASE_SHA"
ALL_CHANGED_FILES=$(git diff --name-only --diff-filter=ACMR "$BASE_SHA" "$HEAD_REF_SHA")
if [ -z "$ALL_CHANGED_FILES" ]; then
echo "all_changed_files=" >> "$GITHUB_OUTPUT"
else
echo "all_changed_files=$(echo "$ALL_CHANGED_FILES" | tr '\n' ' ')" >> "$GITHUB_OUTPUT"
fi
- name: Run sensitive files check
if: steps.check-labels.outputs.skip != 'true'
env:
ALL_CHANGED_FILES: ${{ steps.sensitive-changed-files.outputs.all_changed_files }}
run: |
if [ -z "$ALL_CHANGED_FILES" ]; then
echo "No changed files. Skipping sensitive file check."
exit 0
fi
python3 .github-central/.github/workflows/scripts/sensitive_file_check.py \
--config .github/workflows/config/sensitive-files.txt \
--files $(set -f; echo $ALL_CHANGED_FILES)
- name: List all changed unauthorized files
if: steps.sensitive-check.outcome == 'failure'
run: |
echo "::error::Unauthorized changes detected in sensitive files."
echo ""
echo "To override:"
echo "Add the 'ignore-sensitive-files-pr' label to this PR."
exit 1
Check-AutoDocs:
needs: [Code-Quality-Checks]
uses: PalisadoesFoundation/.github/.github/workflows/typescript-autodocs.yml@main
with:
pnpm-version: "10.28.1" # This version should match package.json packageManager field
Generate-Schema-Docs:
if: ${{ github.actor != 'dependabot[bot]' }}
name: Generate GraphQL Schema Documentation
runs-on: ubuntu-latest
needs: [Code-Quality-Checks]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
cache: "pnpm"
- name: Prepare dependency store
run: pnpm fetch
- name: Install Docs dependencies
working-directory: ./docs
run: pnpm install --frozen-lockfile --prefer-offline
- name: Generate GraphQL Schema Markdown
working-directory: ./docs
run: pnpm docusaurus graphql-to-doc
- name: Check for uncommitted schema changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "::error::Schema files are outdated or missing."
echo "Please run 'pnpm docusaurus graphql-to-doc' inside '/docs' locally and commit the updated files."
echo ""
echo "Changed files:"
git status --porcelain
exit 1
else
echo "Schema is up to date."
fi
Pre-Test-Checks-Pass:
name: All Pre-Testing Checks Pass
runs-on: ubuntu-latest
needs:
[
Code-Quality-Checks,
python_checks,
check_type_errors,
check_mock_isolation,
check_drizzle_migrations,
check_gql_tada,
Check-AutoDocs,
Check_unused_code,
Generate-Schema-Docs,
Python-Compliance,
]
steps:
- name: This job intentionally does nothing
run: echo "This job intentionally does nothing"
Install-Script-Tests:
name: Run install script tests
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [Pre-Test-Checks-Pass]
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
# simplecov 0.21.x pulls simplecov-html 0.11.0 which requires Ruby ~> 2.4; use
# a patched simplecov so we can use simplecov-html 0.12.3 (Ruby 3.2 compatible)
- name: Prepare simplecov for Ruby 3.2
run: |
mkdir -p test/install_scripts/vendor
git clone --depth 1 --branch v0.21.2 https://github.com/simplecov-ruby/simplecov.git test/install_scripts/vendor/simplecov
sed -i 's/simplecov-html", "~> 0.11/simplecov-html", ">= 0.11/' test/install_scripts/vendor/simplecov/simplecov.gemspec
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: "3.2"
bundler-cache: true
working-directory: test/install_scripts
- name: Run install script tests with coverage
working-directory: test/install_scripts
run: |
chmod +x run-all.sh
bundle exec bashcov --root ../.. -- ./run-all.sh
# Upload only when coverage file exists so the install flag gets data and shows in Codecov
- name: Upload install coverage to Codecov
if: "!cancelled() && hashFiles('test/install_scripts/coverage/coverage.xml') != ''"
uses: codecov/codecov-action@v5
with:
name: "${{env.CODECOV_UNIQUE_NAME}}-install"
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
verbose: true
files: test/install_scripts/coverage/coverage.xml
flags: install
Run-Tests:
name: Run tests for talawa api (Shard ${{ matrix.shard }})
timeout-minutes: 10
runs-on: ubuntu-latest
needs: [Pre-Test-Checks-Pass]
env:
TOTAL_SHARDS: 12
strategy:
fail-fast: false
matrix:
shard: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Create .env file for talawa api testing environment
run: |
cp ./envFiles/.env.ci ./.env
echo "NODE_ENV=production" >> .env
JWT_SECRET=$(openssl rand -hex 32)
echo "API_JWT_SECRET=$JWT_SECRET" >> .env
echo "API_AUTH_JWT_SECRET=$JWT_SECRET" >> .env
- name: Build talawa api compose testing environment
run: docker compose build
- name: Start test services (postgres-test, minio-test, redis-test, mailpit)
run: docker compose up -d postgres-test minio-test redis-test mailpit
- name: Wait for test services to be ready
run: |
set -euo pipefail
echo "Waiting for Postgres test service..."
timeout=90
until docker compose exec -T postgres-test pg_isready -h localhost -p 5432 -U postgres >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Postgres not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "Error: Postgres failed to start"
docker compose ps
docker compose logs postgres-test
docker compose down -v
exit 1
fi
echo "Postgres is ready."
# Wait for minio-test health check
echo "Waiting for Minio test service..."
timeout=60
until docker compose exec -T minio-test mc ready local >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Minio not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Minio health check timed out, continuing anyway"
else
echo "Minio is ready."
fi
# Wait for redis-test health check
echo "Waiting for Redis test service..."
timeout=60
until docker compose exec -T redis-test redis-cli ping >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Redis not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Redis health check timed out, continuing anyway"
else
echo "Redis is ready."
fi
# Wait for mailpit health check
echo "Waiting for Mailpit service..."
timeout=60
until curl -f http://localhost:8025/api/v1/info >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Mailpit not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Mailpit health check timed out, continuing anyway"
else
echo "Mailpit is ready."
fi
echo "All test services are ready."
- name: Test Mailpit email service
run: |
echo "Testing Mailpit email service..."
# Test Mailpit API is responding
MAILPIT_INFO=$(curl -s http://localhost:8025/api/v1/info 2>/dev/null || echo '{}')
echo "Mailpit info: $MAILPIT_INFO"
# Check if Mailpit API is responding with valid data
if echo "$MAILPIT_INFO" | grep -q '"version"'; then
echo "SUCCESS: Mailpit API is responding correctly"
echo "SUCCESS: Mailpit is ready to capture emails"
else
echo "::warning::Mailpit API may not be responding properly"
fi
# Note: Email functionality will be tested during the actual test suite execution
# The API container will use Mailpit as configured in .env.ci
- name: Run tests (shard ${{ matrix.shard }}/${{ env.TOTAL_SHARDS }})
env:
SHARD_INDEX: ${{ matrix.shard }}
SHARD_COUNT: ${{ env.TOTAL_SHARDS }}
run: |
# Run tests without --rm to allow coverage extraction
docker compose run --name talawa-api-test-shard-${{ matrix.shard }} \
-e SHARD_INDEX=$SHARD_INDEX \
-e SHARD_COUNT=$SHARD_COUNT \
api /bin/sh -c "node scripts/run-shard.js --coverage -c vitest.unit.config.ts --coverage.reportsDirectory=./coverage/unit && node scripts/run-shard.js --coverage -c vitest.integration.config.ts --coverage.reportsDirectory=./coverage/integration"
- name: Copy coverage from container
if: always()
run: |
# Copy coverage from the named container
docker cp talawa-api-test-shard-${{ matrix.shard }}:/home/talawa/api/coverage ./coverage || echo "::warning::Failed to copy coverage from container talawa-api-test-shard-${{ matrix.shard }}"
- name: Cleanup test container
if: always()
run: |
# Remove the test container
docker rm -f talawa-api-test-shard-${{ matrix.shard }} || true
- name: Upload coverage artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage-shard-${{ matrix.shard }}
path: ./coverage/
retention-days: 1
Merge-Coverage:
name: Merge Coverage Reports
runs-on: ubuntu-latest
needs: [Run-Tests]
if: success()
steps:
- name: Checkout the Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Fetch base branch for Codecov comparison
run: |
set -e
echo "Fetching base branch: ${{ github.base_ref }}"
if ! git fetch origin ${{ github.base_ref }} 2>&1; then
echo "ERROR: Failed to fetch base branch '${{ github.base_ref }}' from origin"
exit 1
fi
echo "Successfully fetched base branch"
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
cache: "pnpm"
- name: Prepare dependency store
run: pnpm fetch
- name: Install Dependencies
run: pnpm install --frozen-lockfile --prefer-offline
- name: Download all coverage artifacts
id: download-artifacts
continue-on-error: true
uses: actions/download-artifact@v4
with:
pattern: coverage-shard-*
path: ./coverage-shards/
merge-multiple: false
- name: Check if artifacts were downloaded
id: check-artifacts
run: |
# Check if any coverage files exist
if find coverage-shards -name "lcov.info" -type f | grep -q .; then
echo "artifacts_found=true" >> $GITHUB_OUTPUT
echo "Coverage artifacts found"
else
echo "artifacts_found=false" >> $GITHUB_OUTPUT
echo "No coverage artifacts found - tests may have been skipped"
fi
- name: Prepare Split Coverage Reports
if: steps.check-artifacts.outputs.artifacts_found == 'true'
run: |
# Function to merge and generate report for a specific type (unit or integration)
process_coverage() {
TYPE=$1
echo "Processing $TYPE coverage..."
mkdir -p ./coverage/$TYPE
mkdir -p .nyc_output_$TYPE
# Find all coverage-final.json files for this type
# Expected path: coverage-shards/coverage-shard-*/$TYPE/coverage-final.json
find coverage-shards -path "*/$TYPE/coverage-final.json" -type f > json-files-$TYPE.txt
JSON_COUNT=$(wc -l < json-files-$TYPE.txt)
echo "Found $JSON_COUNT JSON files for $TYPE"
if [ "$JSON_COUNT" -eq 0 ]; then
echo "::warning::No coverage files found for $TYPE"
return
fi
# Copy to unique names
SHARD_NUM=0
while IFS= read -r file; do
cp "$file" ".nyc_output_$TYPE/coverage-shard-${SHARD_NUM}.json"
SHARD_NUM=$((SHARD_NUM + 1))
done < json-files-$TYPE.txt
# Merge
pnpm exec nyc merge .nyc_output_$TYPE ./coverage/$TYPE/coverage-final.json
# Path rewrite
DOCKER_PATH="/home/talawa/api"
RUNNER_PATH="$GITHUB_WORKSPACE"
sed -i "s|${DOCKER_PATH}|${RUNNER_PATH}|g" ./coverage/$TYPE/coverage-final.json
# Report
rm -rf .nyc_output
mkdir -p .nyc_output
cp ./coverage/$TYPE/coverage-final.json .nyc_output/coverage-final.json
pnpm exec nyc report --reporter=lcov --report-dir=./coverage/$TYPE
rm -rf .nyc_output_$TYPE
}
process_coverage "unit"
process_coverage "integration"
# Create a combined report for local VeryGoodCoverage check
echo "Merging ALL coverage for local check..."
mkdir -p .nyc_output_all
mkdir -p ./coverage/vitest
find coverage-shards -name "coverage-final.json" -type f > json-files-all.txt
SHARD_NUM=0
while IFS= read -r file; do
cp "$file" ".nyc_output_all/coverage-shard-${SHARD_NUM}.json"
SHARD_NUM=$((SHARD_NUM + 1))
done < json-files-all.txt
pnpm exec nyc merge .nyc_output_all ./coverage/vitest/coverage-final.json
# Path rewrite
DOCKER_PATH="/home/talawa/api"
RUNNER_PATH="$GITHUB_WORKSPACE"
sed -i "s|${DOCKER_PATH}|${RUNNER_PATH}|g" ./coverage/vitest/coverage-final.json
# Report
rm -rf .nyc_output
mkdir -p .nyc_output
cp ./coverage/vitest/coverage-final.json .nyc_output/coverage-final.json
pnpm exec nyc report --reporter=lcov --report-dir=./coverage/vitest
rm -rf .nyc_output_all .nyc_output
- name: Calculate merge base for Codecov
if: steps.check-artifacts.outputs.artifacts_found == 'true'
id: get-merge-base
run: |
MERGE_BASE=$(git merge-base origin/${{ github.base_ref }} HEAD)
echo "Merge base commit: $MERGE_BASE"
echo "merge_base=$MERGE_BASE" >> $GITHUB_OUTPUT
git show -s --format=%ci $MERGE_BASE
#######################################################################
# DO NOT DELETE ANY references to env.CODECOV_UNIQUE_NAME in this
# section. They are required for accurate calculations
#######################################################################
- name: Upload Unit Coverage to Codecov
if: steps.check-artifacts.outputs.artifacts_found == 'true'
uses: codecov/codecov-action@v5
with:
name: "${{env.CODECOV_UNIQUE_NAME}}-unit"
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
verbose: true
exclude: "docs/"
gcov_ignore: "docs/"
files: ./coverage/unit/lcov.info
flags: unit
- name: Upload Integration Coverage to Codecov
if: steps.check-artifacts.outputs.artifacts_found == 'true'
uses: codecov/codecov-action@v5
with:
name: "${{env.CODECOV_UNIQUE_NAME}}-integration"
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
verbose: true
exclude: "docs/"
gcov_ignore: "docs/"
files: ./coverage/integration/lcov.info
flags: integration
- name: Test acceptable level of code coverage
if: steps.check-artifacts.outputs.artifacts_found == 'true'
uses: VeryGoodOpenSource/very_good_coverage@v3
with:
path: "./coverage/vitest/lcov.info"
min_coverage: 80.0
Test-Docusaurus-Deployment:
name: Test Deployment to https://docs-api.talawa.io
runs-on: ubuntu-latest
needs: [Run-Tests]
# Run only if the develop branch and not dependabot
if: ${{ github.actor != 'dependabot[bot]' && github.event.pull_request.base.ref == 'develop' }}
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v4
with:
cache: true
cache_dependency_path: |
docs/pnpm-lock.yaml
docs/package.json
# Run Docusaurus in the ./docs directory
- name: Install dependencies
working-directory: ./docs
run: pnpm install --frozen-lockfile
- name: Test building the website
working-directory: ./docs
run: pnpm build
Check-Sample-Data:
name: Check If Sample Data Script Executes Successfully
timeout-minutes: 5
runs-on: ubuntu-latest
needs: [Run-Tests]
steps:
- name: Checkout Backend
uses: actions/checkout@v4.2.2
- name: Setup Devcontainer
run: |
npm install -g @devcontainers/cli
cp envFiles/.env.devcontainer .env
devcontainer up --workspace-folder . --config .devcontainer/default/devcontainer.json
echo "Devcontainer started"
- name: Validate Devcontainer Environment
run: |
echo "Running environment validation checks..."
# Run ONLY the validation script (lightweight, doesn't reinstall everything)
docker exec talawa-api-1 /bin/bash -c "cd /home/talawa/api && ./.devcontainer/validate-setup.sh"
# If validation fails, this step will fail
echo "Environment validation passed"
- name: Wait for Postgres in devcontainer before migrations
run: |
set -euo pipefail
POSTGRES_USER=postgres
echo "Waiting for Postgres service via compose..."
timeout=90
until docker compose exec -T postgres pg_isready -h localhost -p 5432 -U "$POSTGRES_USER" >/dev/null 2>&1 || [ "$timeout" -le 0 ]; do
echo "Postgres not ready ($timeout s left)"
sleep 1
((timeout--))
done
if [ "$timeout" -eq 0 ]; then
echo "Error: Postgres failed to start"
docker compose ps
docker compose logs postgres
exit 1
fi
- name: Apply Database Migrations
run: |
docker exec talawa-api-1 /bin/bash -c 'pnpm apply_drizzle_migrations'
- name: Start Backend Server
run: |
docker exec -d talawa-api-1 /bin/bash -c 'pnpm run start_development_server'
- name: Wait for Backend to be Ready
run: |
set -euo pipefail
echo "Waiting for backend at http://127.0.0.1:4000/healthcheck"
TIMEOUT=90
INTERVAL=3
ELAPSED=0
until docker exec talawa-api-1 curl -sf http://127.0.0.1:4000/healthcheck >/dev/null; do
if [ "$ELAPSED" -ge "$TIMEOUT" ]; then
echo "Backend failed to start within ${TIMEOUT}s"
echo "=== Backend container logs (last 150 lines) ==="
docker logs talawa-api-1 --tail 150
exit 1
fi
echo "Backend not ready yet... (${ELAPSED}s)"
sleep $INTERVAL
ELAPSED=$((ELAPSED + INTERVAL))
done
echo "Backend is up and responding"
- name: Seed Sample Data
run: |
set -euo pipefail
echo "=== Seeding Sample Data ==="
if docker exec talawa-api-1 /bin/bash -c 'set -a; source ./.env; set +a; pnpm run add:sample_data'; then
echo "Seeding completed successfully"
else
echo "Seeding failed"
echo "=== Container status ==="
docker ps | grep talawa || true
echo "=== Backend logs ==="
docker logs talawa-api-1 --tail 50
echo "=== Users table contents ==="
docker exec talawa-postgres-1 psql -U talawa -d talawa \
-c "SELECT id, email_address, name, role FROM users;" \
2>/dev/null || echo "Could not query users"
exit 1
fi
- name: Cleanup - Free ports by stopping containers
if: always()
run: |
docker compose down
Check-App-Startup:
name: Check App Startup and Health
runs-on: ubuntu-latest
needs: [Run-Tests]
steps:
- name: Checkout repository
uses: actions/checkout@v4.2.2
- name: Install Devcontainer CLI
run: npm install -g @devcontainers/cli
- name: Copy devcontainer environment file
run: cp envFiles/.env.devcontainer .env
- name: Bring up devcontainer
run: devcontainer up --workspace-folder . --config .devcontainer/default/devcontainer.json
- name: Validate Devcontainer Environment
run: |
echo "Running environment validation checks..."
# Run ONLY the validation script (lightweight, doesn't reinstall everything)
docker exec talawa-api-1 /bin/bash -c "cd /home/talawa/api && ./.devcontainer/validate-setup.sh"
# If validation fails, this step will fail
echo "Environment validation passed"
- name: Wait for Postgres in devcontainer
run: |
set -euo pipefail
POSTGRES_USER=postgres
echo "Waiting for Postgres service via compose..."
timeout=90
until docker compose exec -T postgres pg_isready -h localhost -p 5432 -U "$POSTGRES_USER" >/dev/null 2>&1 || [ "$timeout" -le 0 ]; do
echo "Postgres not ready ($timeout s left)"
sleep 1
((timeout--))
done
if [ "$timeout" -eq 0 ]; then
echo "Error: Postgres failed to start"
docker compose ps
docker logs postgres
exit 1
fi
- name: Wait for Devcontainer to be ready
run: |
echo "Waiting for devcontainer services to be ready..."
sleep 10
- name: Verify Running Containers
run: docker ps
- name: Start server and monitor logs
id: api-container
run: |
API_CONTAINER=$(docker compose ps -q api 2>/dev/null || true)
if [ -z "$API_CONTAINER" ]; then
API_CONTAINER=$(docker ps --format '{{.Names}}' | grep -E '\-api\-1$' | head -1 || true)
fi
if [ -z "$API_CONTAINER" ]; then
API_CONTAINER="talawa-api-1"
fi
echo "name=$API_CONTAINER" >> "$GITHUB_OUTPUT"
echo "Resolved API container: $API_CONTAINER"
- name: Install dependencies inside the container
run: docker exec ${{ steps.api-container.outputs.name }} /bin/bash -c 'pnpm install'
- name: Start server
run: docker exec -d ${{ steps.api-container.outputs.name }} /bin/bash -c 'pnpm run start_development_server'
- name: Wait for GraphQL endpoint to become available
run: |
set -euo pipefail
API_CONTAINER="${{ steps.api-container.outputs.name }}"
echo "Waiting for the GraphQL endpoint to become available (container: $API_CONTAINER)..."
RESPONSE="Connection failed"
for i in $(seq 1 60); do
if ! docker exec "$API_CONTAINER" true 2>/dev/null; then
echo "Container $API_CONTAINER not found or not running. Waiting... (attempt $i/60)"
sleep 2
continue
fi
if ! docker exec "$API_CONTAINER" which curl >/dev/null 2>&1; then
docker exec "$API_CONTAINER" sh -c 'command -v wget >/dev/null || (apt-get update -qq && apt-get install -y -qq curl)' || true
fi
RESPONSE=$(docker exec "$API_CONTAINER" curl -s -X POST http://127.0.0.1:4000/graphql -H "Content-Type: application/json" -d '{"query":"{__typename}"}' 2>/dev/null || echo "Connection failed")
if echo "$RESPONSE" | grep -q '__typename'; then
echo "GraphQL endpoint is available!"
exit 0
fi
echo "GraphQL endpoint not ready (attempt $i/60). Retrying in 2 seconds..."
sleep 2
done
echo "GraphQL endpoint did not become available within the expected time."
echo "=== Last response ==="
echo "${RESPONSE:-Connection failed}"
if docker exec "$API_CONTAINER" true 2>/dev/null; then
echo "=== Backend container logs (tail 80) ==="
docker logs "$API_CONTAINER" --tail 80
else
echo "=== Container $API_CONTAINER not running; docker ps -a ==="
docker ps -a
fi
exit 1
- name: Cleanup - Free ports by stopping containers
if: always()
run: docker compose down