Skip to content

v0.7.0

v0.7.0 #20

name: Publish to APT Repository on R2
# This workflow publishes Debian packages to an APT repository hosted on Cloudflare R2.
# To optimize performance, it only syncs metadata files (dists/) and uploads new .deb files,
# avoiding the need to download all existing .deb packages.
on:
release:
types: [published] # Triggers when a new release is published
workflow_dispatch: # allow manual triggering in GitHub UI
jobs:
build_and_publish_apt:
runs-on: ubuntu-latest
permissions:
contents: read # To checkout the repository
id-token: write # If you were to use Cloudflare OIDC for rclone (more advanced)
env:
R2_BUCKET_NAME: ${{ secrets.R2_BUCKET_NAME }}
APT_REPO_PREFIX: "" # Optional: if your repo is in a subdirectory within the R2 bucket root e.g. "apt"
APT_DISTRIBUTION: "stable"
APT_COMPONENT: "main"
APT_REPO_NAME: "ftr-repo" # Name of the aptly local repo
GPG_KEY_ID: ${{ secrets.GPG_KEY_ID }}
# For rclone S3 provider
RCLONE_S3_PROVIDER: "Cloudflare"
RCLONE_S3_ACCESS_KEY_ID: ${{ secrets.RCLONE_ACCESS_KEY_ID }}
RCLONE_S3_SECRET_ACCESS_KEY: ${{ secrets.RCLONE_SECRET_ACCESS_KEY }}
RCLONE_S3_ENDPOINT: "https://${{ secrets.RCLONE_ACCOUNT_ID }}.r2.cloudflarestorage.com"
RCLONE_S3_REGION: "auto" # Or your specific region, e.g., "wnam"
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
sparse-checkout: |
Cargo.toml
# Rust toolchain not needed since we're downloading pre-built .deb files
- name: Install aptly
run: |
sudo apt-get update
sudo apt-get install -y aptly ca-certificates jq curl
# Verify installation
aptly version
- name: Install rclone
run: |
sudo apt-get update
sudo apt-get install -y rclone
- name: Configure rclone (using env vars)
run: |
rclone version # Verifies rclone can see S3 env vars (RCLONE_S3_*)
# No explicit rclone config file needed when using env vars for S3 provider
# Debug: Test R2 connection
echo "Testing R2 bucket access..."
echo "Bucket name: ${R2_BUCKET_NAME}"
echo "Bucket name length: ${#R2_BUCKET_NAME}"
echo "Endpoint: ${RCLONE_S3_ENDPOINT}"
echo "Access Key ID starts with: ${RCLONE_S3_ACCESS_KEY_ID:0:8}..."
# Try to list the bucket
echo "Listing bucket contents:"
rclone ls ":s3:${R2_BUCKET_NAME}/" --max-depth 1 || echo "Failed to list bucket"
# Test write access (R2 doesn't allow bucket operations)
echo "Testing write access:"
echo "test" > /tmp/test-apt-workflow.txt
rclone copy /tmp/test-apt-workflow.txt ":s3:${R2_BUCKET_NAME}/" --s3-no-check-bucket -v || echo "Failed to upload test file"
rm -f /tmp/test-apt-workflow.txt
- name: Import GPG Key
uses: crazy-max/ghaction-import-gpg@v6
with:
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.GPG_PASSPHRASE }}
# GPG User ID will be automatically trusted.
- name: Prepare aptly directories and data
run: |
mkdir -p ~/.aptly/db ~/.aptly/public
# For now, start fresh each time to avoid corruption issues
# TODO: Implement proper state recovery once we have a stable APT repo
echo "Starting with fresh aptly database..."
# Ensure aptly config exists
echo '{ "rootDir": "'$HOME'/.aptly" }' > ~/.aptly.conf
echo "Created aptly.conf"
aptly version # Verify aptly is working
- name: Download release assets
run: |
# Get the release tag from the event
if [ "${{ github.event_name }}" = "release" ]; then
RELEASE_TAG="${{ github.event.release.tag_name }}"
else
# For manual dispatch, get the latest release
RELEASE_TAG=$(gh release list --limit 1 --json tagName -q '.[0].tagName')
fi
echo "Downloading .deb files from release $RELEASE_TAG"
# Create directory for debs
mkdir -p release-debs
# Download all .deb files from the release
gh release download "$RELEASE_TAG" --pattern "*.deb" --dir release-debs
# List downloaded files
ls -la release-debs/
# Extract package name from Cargo.toml
CARGO_PKG_NAME=$(cargo metadata --no-deps --format-version 1 | jq -r '.packages[0].name')
echo "CARGO_PKG_NAME=$CARGO_PKG_NAME" >> $GITHUB_ENV
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Add package to aptly and publish
env:
APTLY_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} # Pass GPG passphrase to aptly
run: |
# Create local repo (fresh start)
echo "Creating aptly local repo: ${APT_REPO_NAME}"
aptly repo create -distribution="${APT_DISTRIBUTION}" -component="${APT_COMPONENT}" -architectures="amd64,arm64" "${APT_REPO_NAME}"
echo "Adding packages to ${APT_REPO_NAME}..."
# Add all downloaded .deb files
for deb in release-debs/*.deb; do
echo "Adding $deb"
aptly repo add "${APT_REPO_NAME}" "$deb"
done
SNAPSHOT_NAME="${APT_REPO_NAME}-$(date +%s)-${GITHUB_SHA::7}"
echo "Creating snapshot: ${SNAPSHOT_NAME}"
aptly snapshot create "${SNAPSHOT_NAME}" from repo "${APT_REPO_NAME}"
echo "Publishing snapshot..."
# Since we're starting fresh, always create new publication
echo "Publishing snapshot ${SNAPSHOT_NAME} for ${APT_DISTRIBUTION}"
if [ -z "${APT_REPO_PREFIX}" ]; then
aptly publish snapshot -gpg-key="${GPG_KEY_ID}" -distribution="${APT_DISTRIBUTION}" -component="${APT_COMPONENT}" -architectures="amd64,arm64" "${SNAPSHOT_NAME}"
else
aptly publish snapshot -gpg-key="${GPG_KEY_ID}" -distribution="${APT_DISTRIBUTION}" -component="${APT_COMPONENT}" -architectures="amd64,arm64" "${SNAPSHOT_NAME}" "${APT_REPO_PREFIX}"
fi
# Cleanup old snapshots (optional, keep last 5 for example)
aptly snapshot list -sort=time | awk 'NR>5 {print $1}' | xargs -r aptly snapshot drop
- name: Upload APT repository files to R2
run: |
echo "Uploading new .deb packages to R2..."
# Upload all .deb files to their appropriate pool locations
for deb in release-debs/*.deb; do
DEB_FILENAME=$(basename "$deb")
# Extract architecture from filename (e.g., ftr_0.1.2_amd64.deb -> amd64)
ARCH=$(echo "$DEB_FILENAME" | sed -n 's/.*_\([^_]*\)\.deb$/\1/p')
POOL_PATH="pool/${APT_COMPONENT}/${DEB_FILENAME:0:1}/${CARGO_PKG_NAME}/"
echo "Uploading $DEB_FILENAME to $POOL_PATH"
# Note: R2 doesn't support mkdir, directories are created automatically
rclone copy "$deb" ":s3:${R2_BUCKET_NAME}/${POOL_PATH}" --s3-no-check-bucket --progress
done
echo "Uploading updated metadata to R2..."
# Only sync the dists directory (metadata)
rclone sync ~/.aptly/public/dists/ ":s3:${R2_BUCKET_NAME}/dists/" --s3-no-check-bucket --create-empty-src-dirs --progress
# Also ensure the GPG keys exist
if [ ! -f ~/.aptly/public/networkweather.gpg.key ]; then
echo "Exporting GPG public key (armored)..."
gpg --armor --export "${GPG_KEY_ID}" > ~/.aptly/public/networkweather.gpg.key
rclone copy ~/.aptly/public/networkweather.gpg.key ":s3:${R2_BUCKET_NAME}/" --s3-no-check-bucket --progress
fi
if [ ! -f ~/.aptly/public/networkweather.noarmor.gpg ]; then
echo "Exporting GPG public key (dearmored)..."
gpg --export "${GPG_KEY_ID}" > ~/.aptly/public/networkweather.noarmor.gpg
rclone copy ~/.aptly/public/networkweather.noarmor.gpg ":s3:${R2_BUCKET_NAME}/" --s3-no-check-bucket --progress
fi
- name: Upload repository metadata to R2
run: |
# For now, we don't sync the database back since we start fresh each time
# This avoids corruption issues
echo "APT repository published successfully!"
echo "Users can now install packages from: https://apt.networkweather.com/"