-
-
Notifications
You must be signed in to change notification settings - Fork 1.3k
1320 lines (1164 loc) · 51.1 KB
/
pull-request.yml
File metadata and controls
1320 lines (1164 loc) · 51.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
##############################################################################
##############################################################################
#
# NOTE!
#
# Please read the README.md file in this directory that defines what should
# be placed in this file.
#
##############################################################################
##############################################################################
name: Pull request workflow
on:
pull_request:
branches:
- "**"
env:
CODECOV_UNIQUE_NAME: CODECOV_UNIQUE_NAME-${{ github.run_id }}-${{ github.run_number }}
jobs:
check_base_branch:
# only run the job if the pull request actor is not dependabot
if: ${{ github.actor != 'dependabot[bot]' }}
name: Check base branch of the pull request to be develop
runs-on: ubuntu-latest
steps:
- if: github.event.pull_request.base.ref != 'develop'
name: Check base branch
run: |
echo "Pull requests are only allowed against the 'develop' branch. Please refer to the pull request guidelines."
echo "Error: Close this PR and try again."
exit 1
Code-Quality-Checks:
name: Checking code quality
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
with:
fetch-depth: 0 # Fetch all history for git diff
- name: Get changed files for error handling validation
id: changed-files
run: |
# Skip if not in PR context
if [ -z "${{ github.event.pull_request.base.sha }}" ]; then
echo "changed_files=" >> $GITHUB_OUTPUT
exit 0
fi
# Get the base branch ref
BASE_SHA=$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }})
# Get all changed files
CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRT $BASE_SHA ${{ github.event.pull_request.head.sha }} | tr '\n' ' ')
echo "changed_files=${CHANGED_FILES}" >> $GITHUB_OUTPUT
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check code format
run: docker container run talawa_api pnpm format:check
- name: Check TSDoc comments
run: docker container run talawa_api pnpm lint:tsdoc
- name: Check sanitization
run: docker container run talawa_api pnpm lint:sanitization
- name: Validate error handling standards
env:
CHANGED_FILES: ${{ steps.changed-files.outputs.changed_files }}
run: docker container run -e CHANGED_FILES="$CHANGED_FILES" talawa_api pnpm validate:error-handling
- name: Check if the source and target branches are different
if: ${{ github.event.pull_request.base.ref == github.event.pull_request.head.ref }}
run: |
echo "Source Branch ${{ github.event.pull_request.head.ref }}"
echo "Target Branch ${{ github.event.pull_request.base.ref }}"
echo "Error: Source and Target Branches are the same. Please ensure they are different."
echo "Error: Close this PR and try again."
exit 1
- name: Lint shell scripts (shellcheck)
shell: bash
run: |
shopt -s globstar nullglob
files=(scripts/**/*.sh)
if [ ${#files[@]} -eq 0 ]; then
echo "No shell scripts found to lint."
else
shellcheck -x --severity=error "${files[@]}"
fi
Python-Compliance:
name: Check Python Code Style
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: 3.11
- name: Cache pip packages
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python3 -m venv venv
source venv/bin/activate
python -m pip install --upgrade pip
pip install -r .github/workflows/requirements.txt
- name: Run Black Formatter Check
run: |
source venv/bin/activate
black --check .
- name: Run Flake8 Linter
run: |
source venv/bin/activate
flake8 --docstring-convention google --ignore E402,E722,E203,F401,W503 .github
- name: Run pydocstyle
run: |
source venv/bin/activate
pydocstyle --convention=google --add-ignore=D415,D205 .github
- name: Run docstring compliance check
run: |
source venv/bin/activate
python .github/workflows/scripts/check_docstrings.py --directories .github
Count-Changed-Files:
uses: PalisadoesFoundation/.github/.github/workflows/count-changed-files.yml@main
python_checks:
name: Run Python Checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout centralized scripts
uses: actions/checkout@v4
with:
repository: PalisadoesFoundation/.github
path: .github-central
ref: main
- name: Get changed files
id: changed-files
run: |
ALL_CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRT ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }})
# Skip binary assets that cannot be decoded as UTF-8 by the disable-statements check
FILTERED_FILES=$(echo "$ALL_CHANGED_FILES" | grep -Ev '\.(png|jpg|jpeg|gif|webp)$' || true)
echo "all_changed_files=$(echo "$FILTERED_FILES" | tr '\n' ' ')" >> $GITHUB_OUTPUT
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Run Disable Statements Check
run: |
if [ -z "${{ steps.changed-files.outputs.all_changed_files }}" ]; then
echo "No eligible text files changed; skipping disable statements check."
exit 0
fi
python .github-central/.github/workflows/scripts/disable_statements_check.py --files ${{ steps.changed-files.outputs.all_changed_files }}
check_gql_tada:
name: Check gql tada files and configuration
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check gql tada
run: docker container run talawa_api pnpm check_gql_tada
check_drizzle_migrations:
name: Check drizzle migration files
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check drizzle migrations
run: |
JWT_SECRET=$(openssl rand -hex 32)
docker container run --env-file ./envFiles/.env.ci -e "API_JWT_SECRET=$JWT_SECRET" -e "API_AUTH_JWT_SECRET=$JWT_SECRET" talawa_api pnpm check_drizzle_migrations
check_type_errors:
name: Check type errors
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check type errors
run: docker container run talawa_api pnpm typecheck
check_mock_isolation:
name: Check mock isolation
runs-on: ubuntu-latest
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Build talawa api non production environment docker image
run: docker buildx build --file ./docker/api.Containerfile --tag talawa_api --target non_production ./
- name: Check mock isolation
run: docker container run talawa_api pnpm check_mock_isolation
check_rootless_production_config:
name: Check rootless production Docker config (rootless)
runs-on: ubuntu-latest
needs: [Run-Tests]
if: github.actor != 'dependabot[bot]'
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Ensure rootless production files exist
run: |
test -f ./envFiles/.env.rootless.production
test -f ./docker/compose.rootless.production.yaml
- name: Validate docker compose config (rootless env, no interpolation)
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config --no-interpolate -q
- name: Validate docker compose config (rootless env, interpolated)
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config -q
- name: Install rootless prerequisites
run: |
set -euxo pipefail
sudo apt-get update
sudo apt-get install -y uidmap dbus-user-session slirp4netns fuse-overlayfs ca-certificates curl gnupg
# Ensure the Docker apt repo is configured so we can install/upgrade packages.
if ! command -v dockerd-rootless-setuptool.sh >/dev/null 2>&1; then
sudo install -m 0755 -d /etc/apt/keyrings
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
fi
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
fi
# Always install/upgrade rootless extras AND compose plugin.
# The !override YAML tag in compose.rootless.production.yaml requires
# Docker Compose v2.24.4+; the runner's pre-installed version may be older.
# Pin to a minimum version that supports the !override tag.
if ! sudo apt-get install -y docker-ce-rootless-extras 'docker-compose-plugin=2.24.4-*'; then
echo "::notice::Pinned docker-compose-plugin=2.24.4-* not available; installing latest and will verify version below."
sudo apt-get install -y docker-ce-rootless-extras docker-compose-plugin
fi
echo "Docker Compose version:"
docker compose version
# Verify that the installed Compose version satisfies the minimum requirement.
COMPOSE_MIN="2.24.4"
compose_version=$(docker compose version --short 2>/dev/null || echo "0.0.0")
version_gte() {
printf '%s\n%s' "$2" "$1" | sort -V -C
}
if ! version_gte "$compose_version" "$COMPOSE_MIN"; then
echo "::error::Docker Compose v${COMPOSE_MIN}+ required for !override tag, found v${compose_version}"
exit 1
fi
echo "Docker Compose v${compose_version} >= v${COMPOSE_MIN} ✓"
- name: Start rootless Docker daemon
run: |
set -euxo pipefail
export XDG_RUNTIME_DIR="/run/user/$UID"
sudo mkdir -p "$XDG_RUNTIME_DIR"
sudo chown "$USER":"$USER" "$XDG_RUNTIME_DIR"
export PATH="$HOME/bin:$PATH"
if [ ! -f "$HOME/.config/systemd/user/docker.service" ]; then
dockerd-rootless-setuptool.sh install --force
fi
if ! systemctl --user daemon-reload 2>/dev/null; then
echo "::notice::systemctl daemon-reload failed (expected on runners without session bus)"
fi
if ! systemctl --user start docker 2>/dev/null; then
echo "::notice::systemctl start docker failed (expected on runners without session bus)"
fi
if ! systemctl --user --no-pager status docker >/dev/null 2>&1; then
echo "::notice::Falling back to manual dockerd-rootless.sh start"
nohup dockerd-rootless.sh > "$RUNNER_TEMP/dockerd-rootless.log" 2>&1 &
fi
chmod +x scripts/docker/resolve-docker-host.sh
eval "$(./scripts/docker/resolve-docker-host.sh --mode rootless --emit-export --warn-if-docker-group)"
echo "DOCKER_HOST=$DOCKER_HOST" >> "$GITHUB_ENV"
echo "XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR" >> "$GITHUB_ENV"
echo "$HOME/bin" >> "$GITHUB_PATH"
- name: Verify rootless daemon
run: |
set -euxo pipefail
chmod +x scripts/docker/resolve-docker-host.sh
eval "$(./scripts/docker/resolve-docker-host.sh --mode rootless --emit-export --warn-if-docker-group)"
TIMEOUT=60
until docker info >/dev/null 2>&1 || [ "$TIMEOUT" -le 0 ]; do
sleep 2
TIMEOUT=$((TIMEOUT - 2))
done
if ! docker info >/dev/null 2>&1; then
echo "Rootless Docker daemon did not start in time."
if [ -f "$RUNNER_TEMP/dockerd-rootless.log" ]; then
tail -n 200 "$RUNNER_TEMP/dockerd-rootless.log"
fi
exit 1
fi
docker info --format '{{json .SecurityOptions}}' | tee "$RUNNER_TEMP/rootless-security-options.json"
grep -qi rootless "$RUNNER_TEMP/rootless-security-options.json"
- name: Verify rootless works without docker group membership
run: |
set -euxo pipefail
# Remove the current user from the docker group to simulate a pure rootless
# environment where the user has no access to the rootful daemon socket.
if ! sudo deluser "$USER" docker 2>/dev/null; then
echo "::warning::Could not remove $USER from docker group (may not be a member)."
fi
# Verify removal took effect in /etc/group before proceeding.
if getent group docker | grep -qw "$USER"; then
echo "::error::$USER is still listed in docker group in /etc/group after deluser."
exit 1
fi
# Write test commands to a temp script so we can execute them in a
# fresh login shell that re-reads /etc/group via initgroups().
cat > "$RUNNER_TEMP/test-no-docker-group.sh" << 'TESTSCRIPT'
#!/usr/bin/env bash
set -euxo pipefail
# Confirm the docker group is no longer active.
if id -nG | tr " " "\n" | grep -qx docker; then
echo "::error::docker group is still active; cannot validate non-docker-group scenario."
exit 1
fi
# The rootless daemon must still be reachable via DOCKER_HOST.
docker info >/dev/null 2>&1
echo "Rootless Docker is reachable without docker group membership ✓"
# Validate compose config still parses correctly.
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
config -q
echo "Compose config valid without docker group ✓"
TESTSCRIPT
chmod +x "$RUNNER_TEMP/test-no-docker-group.sh"
# `su --login` calls initgroups() which re-reads /etc/group, so the
# docker supplementary group is actually dropped (unlike sg which
# only changes the effective GID). Pass required env vars explicitly.
# NOTE: `su --login` resets CWD to $HOME; cd back to the workspace.
sudo su --login "$USER" -c "\
export DOCKER_HOST='$DOCKER_HOST' \
XDG_RUNTIME_DIR='$XDG_RUNTIME_DIR' \
PATH='$PATH'; \
cd '$GITHUB_WORKSPACE' && \
bash '$RUNNER_TEMP/test-no-docker-group.sh'"
- name: Set CI-safe secrets for rootless production template
run: |
set -euo pipefail
# Align build args with the current runner user to avoid permission mismatches.
echo "API_UID=$(id -u)" >> "$GITHUB_ENV"
echo "API_GID=$(id -g)" >> "$GITHUB_ENV"
# Override sentinel placeholders so the rootless production stack can start in CI.
echo "API_JWT_SECRET=ci_jwt_secret_0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" >> "$GITHUB_ENV"
echo "API_AUTH_JWT_SECRET=ci_auth_jwt_secret_0123456789abcdef" >> "$GITHUB_ENV"
echo "API_COOKIE_SECRET=ci_cookie_secret_0123456789abcdef0123456789abcdef" >> "$GITHUB_ENV"
echo "API_ADMINISTRATOR_USER_PASSWORD=ci_admin_password_0123456789abcdef" >> "$GITHUB_ENV"
# Ensure API <-> MinIO credentials match.
echo "API_MINIO_SECRET_KEY=ci_minio_secret_0123456789abcdef" >> "$GITHUB_ENV"
echo "MINIO_ROOT_PASSWORD=ci_minio_secret_0123456789abcdef" >> "$GITHUB_ENV"
# Ensure API <-> Postgres credentials match.
echo "API_POSTGRES_PASSWORD=ci_postgres_password_0123456789abcdef" >> "$GITHUB_ENV"
echo "POSTGRES_PASSWORD=ci_postgres_password_0123456789abcdef" >> "$GITHUB_ENV"
- name: Start rootless production stack
run: |
set -euo pipefail
dc() {
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml "$@"
}
echo "=== Docker Compose version ==="
docker compose version
echo "=== Resolved compose config ==="
dc config -q
# 1. Build all images first so build failures surface clearly.
dc build
# 2. Start infrastructure (postgres, redis, minio) and wait for them.
echo "=== Starting infrastructure services ==="
dc up -d postgres redis minio
# Wait for Postgres to accept connections (bounded retry, fail early).
PG_RETRIES=30
until dc exec postgres pg_isready -U talawa --timeout=2 2>/dev/null; do
PG_RETRIES=$((PG_RETRIES - 1))
if [ "$PG_RETRIES" -le 0 ]; then
echo "::error::Postgres did not become ready within 60 s."
dc logs --tail 50 postgres
exit 1
fi
echo "Waiting for Postgres... ($PG_RETRIES retries left)"
sleep 2
done
echo "Postgres is ready ✓"
echo "Infrastructure status:"
dc ps
# 3. Start API separately so we can capture its logs if it crashes.
echo "=== Starting API ==="
dc up -d api
# 4. Wait for API healthcheck.
api_id="$(dc ps -q api)"
TIMEOUT=120
until [ "$(docker inspect -f '{{.State.Health.Status}}' "$api_id" 2>/dev/null)" = "healthy" ] || [ "$TIMEOUT" -le 0 ]; do
# If the container exited, stop waiting immediately.
if [ "$(docker inspect -f '{{.State.Status}}' "$api_id" 2>/dev/null)" = "exited" ]; then
echo "::error::API container exited unexpectedly."
break
fi
echo "Waiting for API health... ($TIMEOUT s remaining)"
sleep 3
TIMEOUT=$((TIMEOUT - 3))
done
if [ "$(docker inspect -f '{{.State.Health.Status}}' "$api_id" 2>/dev/null)" != "healthy" ]; then
echo "::error::API did not become healthy."
echo "=== API container state ==="
docker inspect -f '{{json .State}}' "$api_id" || true
echo ""
echo "=== API healthcheck log ==="
docker inspect -f '{{json .State.Health}}' "$api_id" || true
echo ""
echo "=== API application logs ==="
dc logs --tail 300 api
echo "=== All services ==="
dc ps
exit 1
fi
echo "API is healthy. Starting Caddy..."
dc up -d caddy
- name: Verify API reachable via Caddy
run: |
set -euo pipefail
dc() {
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml "$@"
}
# Wait for Caddy port to be published (with retry)
CADDY_RETRIES=10
until http_port="$(dc port caddy 80 | head -n 1 | awk -F: '{print $2}')" && [ -n "$http_port" ]; do
CADDY_RETRIES=$((CADDY_RETRIES - 1))
if [ "$CADDY_RETRIES" -le 0 ]; then
echo "::error::Failed to determine published HTTP port for Caddy after 20s."
dc ps
exit 1
fi
echo "Waiting for Caddy port mapping... ($CADDY_RETRIES retries left)"
sleep 2
done
echo "Caddy port: $http_port"
# Poll Caddy until it's ready to accept connections
echo "Waiting for Caddy to initialize..."
CADDY_READY_RETRIES=10
until curl -fsS -m 2 "http://localhost:${http_port}/" >/dev/null 2>&1 || [ "$CADDY_READY_RETRIES" -le 0 ]; do
CADDY_READY_RETRIES=$((CADDY_READY_RETRIES - 1))
echo "Caddy not ready yet... ($CADDY_READY_RETRIES retries left)"
sleep 1
done
# Re-probe to determine actual success/failure (fixes off-by-one)
if ! curl -fsS -m 2 "http://localhost:${http_port}/" >/dev/null 2>&1; then
echo "::error::Caddy failed to become ready after 10s."
dc ps
dc logs caddy
exit 1
fi
echo "Caddy is ready!"
echo "Checking health endpoint via Caddy on http://localhost:${http_port}/healthcheck"
curl -fsS --retry 10 --retry-delay 2 --max-time 10 "http://localhost:${http_port}/healthcheck" > /dev/null
- name: Stop rootless production stack
if: always()
run: |
docker compose --env-file ./envFiles/.env.rootless.production \
-f ./compose.yaml -f ./docker/compose.rootless.production.yaml \
down -v
- name: Print rootless daemon logs on failure
if: failure()
run: |
if [ -f "$RUNNER_TEMP/dockerd-rootless.log" ]; then
tail -n 200 "$RUNNER_TEMP/dockerd-rootless.log"
fi
Check_unused_code:
name: Check for unused files, exports and dependencies
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4.2.2
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
- name: Setup pnpm
uses: pnpm/action-setup@v4
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run Knip for files and exports
run: pnpm knip --include files,exports
# Dummy environment variables required for Knip to load drizzle.config.ts during file parsing
env:
API_POSTGRES_DATABASE: dummy_db
API_POSTGRES_PASSWORD: dummy_password
API_POSTGRES_HOST: localhost
API_POSTGRES_PORT: "5432"
API_POSTGRES_USER: dummy_user
API_POSTGRES_SSL_MODE: "false"
- name: Run Knip for dependencies
run: pnpm knip --config knip.deps.json --include dependencies
# Dummy environment variables required for Knip to load drizzle.config.ts during file parsing
env:
API_POSTGRES_DATABASE: dummy_db
API_POSTGRES_PASSWORD: dummy_password
API_POSTGRES_HOST: localhost
API_POSTGRES_PORT: "5432"
API_POSTGRES_USER: dummy_user
API_POSTGRES_SSL_MODE: "false"
Check-Sensitive-Files:
if: ${{ github.actor != 'dependabot[bot]' }}
name: Checks if sensitive files have been changed without authorization
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout centralized CI/CD scripts
uses: actions/checkout@v4
with:
repository: PalisadoesFoundation/.github
ref: main
path: .github-central
- name: Make sensitive file checker executable
run: chmod +x .github-central/.github/workflows/scripts/sensitive_file_check.py
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Get PR labels
id: check-labels
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
LABELS="$(gh api repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels --jq '.[].name' | tr '\n' ' ')"
if echo "$LABELS" | grep -qw "ignore-sensitive-files-pr"; then
echo "::notice::Skipping sensitive files check due to 'ignore-sensitive-files-pr' label."
echo "skip=true" >> $GITHUB_OUTPUT
else
echo "skip=false" >> $GITHUB_OUTPUT
fi
- name: Get changed files
id: sensitive-changed-files
if: steps.check-labels.outputs.skip != 'true'
env:
BASE_REF_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_REF_SHA: ${{ github.event.pull_request.head.sha }}
run: |
set -euo pipefail
if [ -z "$BASE_REF_SHA" ] || [ -z "$HEAD_REF_SHA" ]; then
echo "::error::Missing BASE_REF_SHA or HEAD_REF_SHA"
exit 1
fi
echo "Base SHA: $BASE_REF_SHA"
echo "Head SHA: $HEAD_REF_SHA"
BASE_SHA=$(git merge-base "$BASE_REF_SHA" "$HEAD_REF_SHA")
echo "Merge base: $BASE_SHA"
ALL_CHANGED_FILES=$(git diff --name-only --diff-filter=ACMR "$BASE_SHA" "$HEAD_REF_SHA")
if [ -z "$ALL_CHANGED_FILES" ]; then
echo "all_changed_files=" >> "$GITHUB_OUTPUT"
else
echo "all_changed_files=$(echo "$ALL_CHANGED_FILES" | tr '\n' ' ')" >> "$GITHUB_OUTPUT"
fi
- name: Run sensitive files check
if: steps.check-labels.outputs.skip != 'true'
env:
ALL_CHANGED_FILES: ${{ steps.sensitive-changed-files.outputs.all_changed_files }}
run: |
if [ -z "$ALL_CHANGED_FILES" ]; then
echo "No changed files. Skipping sensitive file check."
exit 0
fi
python3 .github-central/.github/workflows/scripts/sensitive_file_check.py \
--config .github/workflows/config/sensitive-files.txt \
--files $(set -f; echo $ALL_CHANGED_FILES)
- name: List all changed unauthorized files
if: steps.sensitive-check.outcome == 'failure'
run: |
echo "::error::Unauthorized changes detected in sensitive files."
echo ""
echo "To override:"
echo "Add the 'ignore-sensitive-files-pr' label to this PR."
exit 1
Check-AutoDocs:
needs: [Code-Quality-Checks]
uses: PalisadoesFoundation/.github/.github/workflows/typescript-autodocs.yml@main
with:
pnpm-version: "10.28.1" # This version should match package.json packageManager field
Generate-Schema-Docs:
if: ${{ github.actor != 'dependabot[bot]' }}
name: Generate GraphQL Schema Documentation
runs-on: ubuntu-latest
needs: [Code-Quality-Checks]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
cache: "pnpm"
- name: Prepare dependency store
run: pnpm fetch
- name: Install Docs dependencies
working-directory: ./docs
run: pnpm install --frozen-lockfile --prefer-offline
- name: Generate GraphQL Schema Markdown
working-directory: ./docs
run: pnpm docusaurus graphql-to-doc
- name: Check for uncommitted schema changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "::error::Schema files are outdated or missing."
echo "Please run 'pnpm docusaurus graphql-to-doc' inside '/docs' locally and commit the updated files."
echo ""
echo "Changed files:"
git status --porcelain
exit 1
else
echo "Schema is up to date."
fi
Pre-Test-Checks-Pass:
name: All Pre-Testing Checks Pass
runs-on: ubuntu-latest
needs:
[
Code-Quality-Checks,
python_checks,
check_type_errors,
check_mock_isolation,
check_drizzle_migrations,
check_gql_tada,
Check-AutoDocs,
Check_unused_code,
Generate-Schema-Docs,
Python-Compliance,
]
steps:
- name: This job intentionally does nothing
run: echo "This job intentionally does nothing"
Install-Script-Tests:
name: Run install script tests
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [Pre-Test-Checks-Pass]
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
# simplecov 0.21.x pulls simplecov-html 0.11.0 which requires Ruby ~> 2.4; use
# a patched simplecov so we can use simplecov-html 0.12.3 (Ruby 3.2 compatible)
- name: Prepare simplecov for Ruby 3.2
run: |
mkdir -p test/install_scripts/vendor
git clone --depth 1 --branch v0.21.2 https://github.com/simplecov-ruby/simplecov.git test/install_scripts/vendor/simplecov
sed -i 's/simplecov-html", "~> 0.11/simplecov-html", ">= 0.11/' test/install_scripts/vendor/simplecov/simplecov.gemspec
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: "3.2"
bundler-cache: true
working-directory: test/install_scripts
- name: Run install script tests with coverage
working-directory: test/install_scripts
run: |
chmod +x run-all.sh
bundle exec bashcov --root ../.. -- ./run-all.sh
# Upload only when coverage file exists so the install flag gets data and shows in Codecov
- name: Upload install coverage to Codecov
if: "!cancelled() && hashFiles('test/install_scripts/coverage/coverage.xml') != ''"
uses: codecov/codecov-action@v5
with:
name: "${{env.CODECOV_UNIQUE_NAME}}-install"
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
verbose: true
files: test/install_scripts/coverage/coverage.xml
flags: install
Run-Tests:
name: Run tests for talawa api (Shard ${{ matrix.shard }})
timeout-minutes: 10
runs-on: ubuntu-latest
needs: [Pre-Test-Checks-Pass]
env:
TOTAL_SHARDS: 12
strategy:
fail-fast: false
matrix:
shard: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
steps:
- name: Checkout this repository
uses: actions/checkout@v4.2.2
- name: Create .env file for talawa api testing environment
run: |
cp ./envFiles/.env.ci ./.env
echo "NODE_ENV=production" >> .env
JWT_SECRET=$(openssl rand -hex 32)
echo "API_JWT_SECRET=$JWT_SECRET" >> .env
echo "API_AUTH_JWT_SECRET=$JWT_SECRET" >> .env
- name: Build talawa api compose testing environment
run: docker compose build
- name: Start test services (postgres-test, minio-test, redis-test, mailpit)
run: docker compose up -d postgres-test minio-test redis-test mailpit
- name: Wait for test services to be ready
run: |
set -euo pipefail
echo "Waiting for Postgres test service..."
timeout=90
until docker compose exec -T postgres-test pg_isready -h localhost -p 5432 -U postgres >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Postgres not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "Error: Postgres failed to start"
docker compose ps
docker compose logs postgres-test
docker compose down -v
exit 1
fi
echo "Postgres is ready."
# Wait for minio-test health check
echo "Waiting for Minio test service..."
timeout=60
until docker compose exec -T minio-test mc ready local >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Minio not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Minio health check timed out, continuing anyway"
else
echo "Minio is ready."
fi
# Wait for redis-test health check
echo "Waiting for Redis test service..."
timeout=60
until docker compose exec -T redis-test redis-cli ping >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Redis not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Redis health check timed out, continuing anyway"
else
echo "Redis is ready."
fi
# Wait for mailpit health check
echo "Waiting for Mailpit service..."
timeout=60
until curl -f http://localhost:8025/api/v1/info >/dev/null 2>&1 || [ $timeout -eq 0 ]; do
echo "Mailpit not ready yet... ($timeout seconds remaining)"
sleep 1
((timeout--))
done
if [ $timeout -eq 0 ]; then
echo "::warning::Mailpit health check timed out, continuing anyway"
else
echo "Mailpit is ready."
fi
echo "All test services are ready."
- name: Test Mailpit email service
run: |
echo "Testing Mailpit email service..."
# Test Mailpit API is responding
MAILPIT_INFO=$(curl -s http://localhost:8025/api/v1/info 2>/dev/null || echo '{}')
echo "Mailpit info: $MAILPIT_INFO"
# Check if Mailpit API is responding with valid data
if echo "$MAILPIT_INFO" | grep -q '"version"'; then
echo "SUCCESS: Mailpit API is responding correctly"
echo "SUCCESS: Mailpit is ready to capture emails"
else
echo "::warning::Mailpit API may not be responding properly"
fi
# Note: Email functionality will be tested during the actual test suite execution
# The API container will use Mailpit as configured in .env.ci
- name: Run tests (shard ${{ matrix.shard }}/${{ env.TOTAL_SHARDS }})
env:
SHARD_INDEX: ${{ matrix.shard }}
SHARD_COUNT: ${{ env.TOTAL_SHARDS }}
run: |
# Run tests without --rm to allow coverage extraction
docker compose run --name talawa-api-test-shard-${{ matrix.shard }} \
-e SHARD_INDEX=$SHARD_INDEX \
-e SHARD_COUNT=$SHARD_COUNT \
api /bin/sh -c "node scripts/run-shard.js --coverage -c vitest.unit.config.ts --coverage.reportsDirectory=./coverage/unit && node scripts/run-shard.js --coverage -c vitest.integration.config.ts --coverage.reportsDirectory=./coverage/integration"
- name: Copy coverage from container
if: always()
run: |
# Copy coverage from the named container
docker cp talawa-api-test-shard-${{ matrix.shard }}:/home/talawa/api/coverage ./coverage || echo "::warning::Failed to copy coverage from container talawa-api-test-shard-${{ matrix.shard }}"
- name: Cleanup test container
if: always()
run: |
# Remove the test container
docker rm -f talawa-api-test-shard-${{ matrix.shard }} || true
- name: Upload coverage artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage-shard-${{ matrix.shard }}
path: ./coverage/
retention-days: 1
Merge-Coverage:
name: Merge Coverage Reports
runs-on: ubuntu-latest
needs: [Run-Tests]
if: success()
steps:
- name: Checkout the Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Fetch base branch for Codecov comparison
run: |
set -e
echo "Fetching base branch: ${{ github.base_ref }}"
if ! git fetch origin ${{ github.base_ref }} 2>&1; then
echo "ERROR: Failed to fetch base branch '${{ github.base_ref }}' from origin"
exit 1
fi
echo "Successfully fetched base branch"
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
run_install: false
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "24.x"
cache: "pnpm"
- name: Prepare dependency store
run: pnpm fetch
- name: Install Dependencies
run: pnpm install --frozen-lockfile --prefer-offline
- name: Download all coverage artifacts
id: download-artifacts
continue-on-error: true
uses: actions/download-artifact@v4
with:
pattern: coverage-shard-*
path: ./coverage-shards/
merge-multiple: false
- name: Check if artifacts were downloaded
id: check-artifacts
run: |
# Check if any coverage files exist
if find coverage-shards -name "lcov.info" -type f | grep -q .; then
echo "artifacts_found=true" >> $GITHUB_OUTPUT
echo "Coverage artifacts found"
else
echo "artifacts_found=false" >> $GITHUB_OUTPUT
echo "No coverage artifacts found - tests may have been skipped"
fi
- name: Prepare Split Coverage Reports
if: steps.check-artifacts.outputs.artifacts_found == 'true'
run: |
# Function to merge and generate report for a specific type (unit or integration)
process_coverage() {
TYPE=$1
echo "Processing $TYPE coverage..."
mkdir -p ./coverage/$TYPE
mkdir -p .nyc_output_$TYPE
# Find all coverage-final.json files for this type
# Expected path: coverage-shards/coverage-shard-*/$TYPE/coverage-final.json
find coverage-shards -path "*/$TYPE/coverage-final.json" -type f > json-files-$TYPE.txt
JSON_COUNT=$(wc -l < json-files-$TYPE.txt)
echo "Found $JSON_COUNT JSON files for $TYPE"
if [ "$JSON_COUNT" -eq 0 ]; then
echo "::warning::No coverage files found for $TYPE"