Merge pull request #14 from offen/backup-archive

allow local storage of backups
This commit is contained in:
Frederik Ring 2021-08-19 11:14:04 +02:00 committed by GitHub
commit 8c7ffc3d99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 75 additions and 20 deletions

View File

@ -1,7 +1,7 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.16-alpine as builder
FROM golang:1.17-alpine as builder
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION

View File

@ -1,8 +1,8 @@
# docker-volume-backup
Backup Docker volumes to any S3 compatible storage.
Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
## Configuration
@ -30,15 +30,33 @@ AWS_S3_BUCKET_NAME="<xxx>"
# This is the FQDN of your storage server, e.g. `storage.example.com`.
# Do not set this when working against AWS S3. If you need to set a
# specific protocol, you will need to use the option below.
# AWS_ENDPOINT="<xxx>"
# The protocol to be used when communicating with your storage server.
# Defaults to "https". You can set this to "http" when communicating with
# a different Docker container on the same host for example.
# AWS_ENDPOINT_PROTO="https"
# In addition to backing up you can also store backups locally. Pass in
# a local path to store your backups here if needed. You likely want to
# mount a local folder or Docker volume into that location when running
# the container. Local paths can also be subject to pruning of old
# backups as defined below.
# BACKUP_ARCHIVE="/archive"
########### BACKUP PRUNING
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
# The mechanism used for pruning backups is not very sophisticated
# and applies its rules to **all files in the target directory**,
# which means that if you are storing your backups next to other files,
# these might become subject to deletion too. When using this option
# make sure the backup files are stored in a directory used exclusively
# for storing them or you might lose data.
# Define this value to enable automatic pruning of old backups. The value
# declares the number of days for which a backup is kept.
@ -108,6 +126,10 @@ services:
# to stop the container
- /var/run/docker.sock:/var/run/docker.sock:ro
- data:/backup/my-app-backup:ro
# If you mount a local directory or volume to `/archive` a local
# copy of the backup will be stored there. You can override the
# location inside of the container by setting `BACKUP_ARCHIVE`
# - /path/to/local_backups:/archive
volumes:
data:
```

View File

@ -77,13 +77,24 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
fi
fi
copy_backup () {
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
}
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished."
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Copying backup to local archive"
echo "Will copy to \"$BACKUP_ARCHIVE\"."
copy_backup "$BACKUP_ARCHIVE"
echo "Finished copying."
fi
if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
@ -92,16 +103,12 @@ fi
info "Backup finished"
echo "Will wait for next scheduled backup."
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME
prune () {
target=$1
rule_applies_to=$(
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"backup-target/$bucket" \
"$target" \
| wc -l
)
if [ "$rule_applies_to" == "0" ]; then
@ -110,7 +117,7 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
exit 0
fi
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
@ -119,7 +126,21 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
fi
mc rm $MC_GLOBAL_OPTIONS \
--recursive -force \
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
--recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
}
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Pruning old backups from remote storage"
prune "backup-target/$bucket"
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Pruning old backups from local archive"
prune "$BACKUP_ARCHIVE"
fi
fi

View File

@ -12,10 +12,12 @@ set -e
cat <<EOF > env.sh
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
@ -23,14 +25,16 @@ AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
EOF
chmod a+x env.sh
source env.sh
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
mc $MC_GLOBAL_OPTIONS alias set backup-target \
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
fi
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."

1
test/compose/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
local

View File

@ -26,6 +26,7 @@ services:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock

View File

@ -4,6 +4,8 @@ set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
@ -13,7 +15,11 @@ docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared backup."
echo "[TEST:PASS] Found relevant files in untared remote backup."
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
echo "[TEST:PASS] Found relevant files in untared local backup."
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"