|
#!/usr/bin/env bash |
|
# ============================================================================= |
|
# pg_backup.sh — Backup a containerized PostgreSQL database |
|
# Ubuntu 24.04 | Requires: docker, s3cmd (optional) |
|
# |
|
# Usage: |
|
# ./pg_backup.sh [OPTIONS] |
|
# |
|
# Options: |
|
# -c Container name/ID of the running PostgreSQL container (required) |
|
# -i Docker image to use as pg_dump client (required) |
|
# e.g. postgres:16-alpine |
|
# -n Docker network the container is on (required) |
|
# -b Backup directory on host machine (required) |
|
# -l Max number of backup files to keep (default: 5) |
|
# -d Database name (required) |
|
# -u PostgreSQL username (default: postgres) |
|
# -p PostgreSQL password (optional, or set PGPASSWORD env var) |
|
# |
|
# S3 options (all required together if uploading to S3): |
|
# -s S3 bucket path, e.g. s3://my-bucket/pg-backups |
|
# -k AWS/S3 access key |
|
# -x AWS/S3 secret key |
|
# -e S3 endpoint URL (optional, for non-AWS S3, e.g. https://s3.example.com) |
|
# |
|
# --dry-run Check all connections and credentials without performing a backup |
|
# -h Show this help message |
|
# |
|
# Examples: |
|
# # Local backup only |
|
# ./pg_backup.sh -c my_postgres -i postgres:16-alpine -n app_network \ |
|
# -b /var/backups/postgres -l 5 -d mydb -u postgres -p secret |
|
# |
|
# # With S3 upload |
|
# ./pg_backup.sh -c my_postgres -i postgres:16-alpine -n app_network \ |
|
# -b /var/backups/postgres -l 5 -d mydb -u postgres -p secret \ |
|
# -s s3://my-bucket/pg-backups -k AKIAIOSFODNN7 -x wJalrXUtn \ |
|
# -e https://s3.example.com |
|
# |
|
# Cron example (every day at 2am): |
|
# 0 2 * * * /opt/scripts/pg_backup.sh -c my_postgres -i postgres:16-alpine \ |
|
# -n app_network -b /var/backups/postgres -l 5 -d mydb \ |
|
# -u postgres -p secret >> /var/log/pg_backup.log 2>&1 |
|
# ============================================================================= |
|
|
|
set -euo pipefail |
|
|
|
# ─── Colors ────────────────────────────────────────────────────────────────── |
|
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m' |
|
CYAN='\033[0;36m'; RESET='\033[0m' |
|
|
|
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')] [INFO]${RESET} $*"; } |
|
ok() { echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')] [OK]${RESET} $*"; } |
|
warn() { echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] [WARN]${RESET} $*"; } |
|
error() { echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR]${RESET} $*" >&2; } |
|
die() { error "$*"; exit 1; } |
|
|
|
# ─── Defaults ──────────────────────────────────────────────────────────────── |
|
PG_USER="postgres" |
|
PG_PASS="${PGPASSWORD:-}" |
|
BACKUP_LIMIT=5 |
|
S3_BUCKET="" |
|
S3_ACCESS_KEY="" |
|
S3_SECRET_KEY="" |
|
S3_ENDPOINT="" |
|
|
|
# ─── Usage ─────────────────────────────────────────────────────────────────── |
|
usage() { |
|
grep '^#' "$0" | grep -v '#!/' | sed 's/^# \{0,2\}//' |
|
exit 0 |
|
} |
|
|
|
# ─── Parse args ────────────────────────────────────────────────────────────── |
|
while getopts ":c:i:n:b:l:d:u:p:s:k:x:e:h" opt; do |
|
case $opt in |
|
c) CONTAINER="$OPTARG" ;; |
|
i) PG_IMAGE="$OPTARG" ;; |
|
n) NETWORK="$OPTARG" ;; |
|
b) BACKUP_DIR="$OPTARG" ;; |
|
l) BACKUP_LIMIT="$OPTARG" ;; |
|
d) PG_DB="$OPTARG" ;; |
|
u) PG_USER="$OPTARG" ;; |
|
p) PG_PASS="$OPTARG" ;; |
|
s) S3_BUCKET="$OPTARG" ;; |
|
k) S3_ACCESS_KEY="$OPTARG" ;; |
|
x) S3_SECRET_KEY="$OPTARG" ;; |
|
e) S3_ENDPOINT="$OPTARG" ;; |
|
h) usage ;; |
|
:) die "Option -$OPTARG requires an argument." ;; |
|
\?) die "Unknown option: -$OPTARG" ;; |
|
esac |
|
done |
|
|
|
# ─── Validate required args ─────────────────────────────────────────────────── |
|
[[ -z "${CONTAINER:-}" ]] && die "Missing required: -c (container name)" |
|
[[ -z "${PG_IMAGE:-}" ]] && die "Missing required: -i (pg client image)" |
|
[[ -z "${NETWORK:-}" ]] && die "Missing required: -n (docker network)" |
|
[[ -z "${BACKUP_DIR:-}"]] && die "Missing required: -b (backup directory)" |
|
[[ -z "${PG_DB:-}" ]] && die "Missing required: -d (database name)" |
|
|
|
# Validate S3 args — all or nothing |
|
S3_ENABLED=false |
|
if [[ -n "$S3_BUCKET" || -n "$S3_ACCESS_KEY" || -n "$S3_SECRET_KEY" ]]; then |
|
[[ -z "$S3_BUCKET" ]] && die "S3: missing -s (bucket path)" |
|
[[ -z "$S3_ACCESS_KEY" ]] && die "S3: missing -k (access key)" |
|
[[ -z "$S3_SECRET_KEY" ]] && die "S3: missing -x (secret key)" |
|
S3_ENABLED=true |
|
fi |
|
|
|
# ─── Validate dependencies ──────────────────────────────────────────────────── |
|
command -v docker &>/dev/null || die "'docker' not found in PATH" |
|
if $S3_ENABLED; then |
|
command -v s3cmd &>/dev/null || die "'s3cmd' not found. Install with: apt install s3cmd" |
|
fi |
|
|
|
# ─── Dry-run mode ──────────────────────────────────────────────────────────── |
|
if $DRY_RUN; then |
|
log "━━━ DRY RUN ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" |
|
DRYRUN_FAILED=false |
|
|
|
# 1. Container existence & state |
|
log "[1/4] Checking container '${CONTAINER}'..." |
|
if ! docker inspect "$CONTAINER" &>/dev/null; then |
|
error "Container '${CONTAINER}' does not exist" |
|
DRYRUN_FAILED=true |
|
else |
|
STATE=$(docker inspect --format='{{.State.Running}}' "$CONTAINER") |
|
if [[ "$STATE" == "true" ]]; then |
|
ok "Container '${CONTAINER}' exists and is running" |
|
else |
|
error "Container '${CONTAINER}' exists but is NOT running (state: $(docker inspect --format='{{.State.Status}}' "$CONTAINER"))" |
|
DRYRUN_FAILED=true |
|
fi |
|
fi |
|
|
|
# 2. Network & container membership |
|
log "[2/4] Checking network '${NETWORK}'..." |
|
if ! docker network inspect "$NETWORK" &>/dev/null; then |
|
error "Network '${NETWORK}' does not exist" |
|
DRYRUN_FAILED=true |
|
else |
|
MEMBERS=$(docker network inspect "$NETWORK" --format='{{range $k,$v := .Containers}}{{$v.Name}} {{end}}') |
|
if echo "$MEMBERS" | grep -qw "$CONTAINER"; then |
|
ok "Network '${NETWORK}' exists and container '${CONTAINER}' is connected to it" |
|
else |
|
error "Network '${NETWORK}' exists but container '${CONTAINER}' is NOT connected (members: ${MEMBERS:-none})" |
|
DRYRUN_FAILED=true |
|
fi |
|
fi |
|
|
|
# 3. PostgreSQL connection |
|
log "[3/4] Checking PostgreSQL connection (db: ${PG_DB}, user: ${PG_USER})..." |
|
PG_CHECK=$(docker run --rm \ |
|
--network "$NETWORK" \ |
|
-e "PGPASSWORD=${PG_PASS}" \ |
|
"$PG_IMAGE" \ |
|
psql -h "$CONTAINER" -U "$PG_USER" -d "$PG_DB" -c '\conninfo' 2>&1) && PG_OK=true || PG_OK=false |
|
|
|
if $PG_OK; then |
|
ok "PostgreSQL connection successful" |
|
else |
|
error "PostgreSQL connection failed: ${PG_CHECK}" |
|
DRYRUN_FAILED=true |
|
fi |
|
|
|
# 4. S3 credentials |
|
if $S3_ENABLED; then |
|
log "[4/4] Checking S3 credentials (bucket: ${S3_BUCKET})..." |
|
S3CFG=$(mktemp) |
|
trap 'rm -f "$S3CFG"' EXIT |
|
|
|
cat > "$S3CFG" <<EOF |
|
[default] |
|
access_key = ${S3_ACCESS_KEY} |
|
secret_key = ${S3_SECRET_KEY} |
|
$([ -n "$S3_ENDPOINT" ] && echo "host_base = $(echo "$S3_ENDPOINT" | sed 's|https\?://||')") |
|
$([ -n "$S3_ENDPOINT" ] && echo "host_bucket = $(echo "$S3_ENDPOINT" | sed 's|https\?://||')") |
|
$([ -n "$S3_ENDPOINT" ] && echo "website_endpoint = ${S3_ENDPOINT}") |
|
use_https = True |
|
signature_v2 = False |
|
EOF |
|
|
|
S3_CHECK=$(s3cmd --config="$S3CFG" ls "${S3_BUCKET}/" 2>&1) && S3_OK=true || S3_OK=false |
|
if $S3_OK; then |
|
ok "S3 credentials valid and bucket '${S3_BUCKET}' is accessible" |
|
else |
|
error "S3 check failed: ${S3_CHECK}" |
|
DRYRUN_FAILED=true |
|
fi |
|
else |
|
log "[4/4] S3 not configured — skipping" |
|
fi |
|
|
|
log "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" |
|
if $DRYRUN_FAILED; then |
|
die "Dry run finished with errors. Fix the issues above before running a real backup." |
|
else |
|
ok "All checks passed. Ready to run a real backup." |
|
fi |
|
exit 0 |
|
fi |
|
|
|
# ─── Validate container is running ─────────────────────────────────────────── |
|
docker inspect --format='{{.State.Running}}' "$CONTAINER" 2>/dev/null \ |
|
| grep -q "true" || die "Container '$CONTAINER' is not running" |
|
|
|
# ─── Resolve container's service hostname on the docker network ─────────────── |
|
PG_HOST=$(docker inspect \ |
|
--format="{{range .NetworkSettings.Networks}}{{if eq \"$NETWORK\" (index . | js)}}{{end}}{{end}}{{.Name}}" \ |
|
"$CONTAINER" 2>/dev/null || true) |
|
# Fallback: use container name directly (works on user-defined docker networks) |
|
PG_HOST="${CONTAINER}" |
|
|
|
# ─── Prepare backup directory ──────────────────────────────────────────────── |
|
mkdir -p "$BACKUP_DIR" || die "Cannot create backup directory: $BACKUP_DIR" |
|
|
|
TIMESTAMP=$(date '+%Y%m%d_%H%M%S') |
|
BACKUP_FILE="${BACKUP_DIR}/${PG_DB}_${TIMESTAMP}.dump" |
|
|
|
# ─── Run pg_dump via ephemeral container ───────────────────────────────────── |
|
log "Starting backup of database '${PG_DB}' from container '${CONTAINER}'" |
|
log "Using image: ${PG_IMAGE} | Network: ${NETWORK}" |
|
log "Output: ${BACKUP_FILE}" |
|
|
|
DOCKER_ENV=(-e "PGPASSWORD=${PG_PASS}") |
|
|
|
docker run --rm \ |
|
--network "$NETWORK" \ |
|
"${DOCKER_ENV[@]}" \ |
|
--volume "${BACKUP_DIR}:/backup" \ |
|
"$PG_IMAGE" \ |
|
pg_dump \ |
|
-h "$PG_HOST" \ |
|
-U "$PG_USER" \ |
|
-d "$PG_DB" \ |
|
-F c \ |
|
-f "/backup/${PG_DB}_${TIMESTAMP}.dump" \ |
|
|| die "pg_dump failed" |
|
|
|
ok "Backup created: ${BACKUP_FILE}" |
|
|
|
# ─── Rotate old backups ─────────────────────────────────────────────────────── |
|
log "Rotating backups (keeping last ${BACKUP_LIMIT})..." |
|
BACKUP_COUNT=$(find "$BACKUP_DIR" -maxdepth 1 -name "${PG_DB}_*.dump" | wc -l) |
|
|
|
if (( BACKUP_COUNT > BACKUP_LIMIT )); then |
|
TO_DELETE=$(( BACKUP_COUNT - BACKUP_LIMIT )) |
|
find "$BACKUP_DIR" -maxdepth 1 -name "${PG_DB}_*.dump" \ |
|
| sort \ |
|
| head -n "$TO_DELETE" \ |
|
| while read -r old_file; do |
|
rm -f "$old_file" |
|
warn "Deleted old backup: $old_file" |
|
done |
|
else |
|
log "No rotation needed (${BACKUP_COUNT}/${BACKUP_LIMIT} backups)" |
|
fi |
|
|
|
# ─── Upload to S3 ──────────────────────────────────────────────────────────── |
|
if $S3_ENABLED; then |
|
log "Uploading to S3: ${S3_BUCKET}" |
|
|
|
# Write a temporary s3cmd config to avoid polluting ~/.s3cfg |
|
S3CFG=$(mktemp) |
|
trap 'rm -f "$S3CFG"' EXIT |
|
|
|
cat > "$S3CFG" <<EOF |
|
[default] |
|
access_key = ${S3_ACCESS_KEY} |
|
secret_key = ${S3_SECRET_KEY} |
|
$([ -n "$S3_ENDPOINT" ] && echo "host_base = $(echo "$S3_ENDPOINT" | sed 's|https\?://||')") |
|
$([ -n "$S3_ENDPOINT" ] && echo "host_bucket = $(echo "$S3_ENDPOINT" | sed 's|https\?://||')") |
|
$([ -n "$S3_ENDPOINT" ] && echo "website_endpoint = ${S3_ENDPOINT}") |
|
use_https = True |
|
signature_v2 = False |
|
EOF |
|
|
|
s3cmd --config="$S3CFG" put "$BACKUP_FILE" "${S3_BUCKET}/" \ |
|
|| die "s3cmd upload failed" |
|
|
|
ok "Uploaded to S3: ${S3_BUCKET}/$(basename "$BACKUP_FILE")" |
|
fi |
|
|
|
# ─── Done ──────────────────────────────────────────────────────────────────── |
|
ok "Backup completed successfully." |