Untangle tests (#112)

* Isolate S3 test case

* Isolate webdav test case

* Isolate SSH test case

* Isolate local storage test case

* Isolate gpg test case

* Add missing volume mount

* Fix file locations for local test case

* Remove compose test case, use utils

* Use test utils throughout

* Use dedicated tmp dir

* Fix link location that is being tested

* Use dedicated tmp_dirs when working on host fs

* Force delete artifact

* Fix expected filename

* Provide helpful messages on failing tests

* Fix filename

* Use proper volume names

* Fix syntax error, use large resource class

* Use named Docker volumes when referencing them in test scripts

* Add name of test case to logging output
This commit is contained in:
Frederik Ring 2022-06-23 14:40:29 +02:00 committed by GitHub
parent 1892d56ff6
commit c2a8cc92fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 446 additions and 180 deletions

View File

@ -5,6 +5,7 @@ jobs:
machine: machine:
image: ubuntu-2004:202201-02 image: ubuntu-2004:202201-02
working_directory: ~/docker-volume-backup working_directory: ~/docker-volume-backup
resource_class: large
steps: steps:
- checkout - checkout
- run: - run:
@ -29,6 +30,7 @@ jobs:
DOCKER_BUILDKIT: '1' DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup working_directory: ~/docker-volume-backup
resource_class: large
steps: steps:
- checkout - checkout
- setup_remote_docker: - setup_remote_docker:

View File

@ -3,6 +3,8 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker network create test_network docker network create test_network
docker volume create backup_data docker volume create backup_data
@ -50,17 +52,11 @@ docker run --rm -it \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data' ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
echo "[TEST:PASS] Found relevant files in untared remote backup." pass "Found relevant files in untared remote backup."
# This test does not stop containers during backup. This is happening on # This test does not stop containers during backup. This is happening on
# purpose in order to cover this setup as well. # purpose in order to cover this setup as well.
if [ "$(docker ps -q | wc -l)" != "2" ]; then expect_running_containers "2"
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen) docker rm $(docker stop minio offen)
docker volume rm backup_data app_data docker volume rm backup_data app_data

View File

@ -3,7 +3,8 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker-compose up -d docker-compose up -d
sleep 30 # mariadb likes to take a bit before responding sleep 30 # mariadb likes to take a bit before responding
@ -13,29 +14,27 @@ sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive
tar -xvf ./local/test.tar.gz tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then if [ ! -f ./backup/data/dump.sql ]; then
echo "[TEST:FAIL] Could not find file written by pre command." fail "Could not find file written by pre command."
exit 1
fi fi
echo "[TEST:PASS] Found expected file." pass "Found expected file."
if [ -f ./backup/data/post.txt ]; then if [ -f ./backup/data/post.txt ]; then
echo "[TEST:FAIL] File created in post command was present in backup." fail "File created in post command was present in backup."
exit 1
fi fi
echo "[TEST:PASS] Did not find unexpected file." pass "Did not find unexpected file."
docker-compose down --volumes docker-compose down --volumes
sudo rm -rf ./local sudo rm -rf ./local
echo "[TEST:INFO] Running commands test in swarm mode next." info "Running commands test in swarm mode next."
docker swarm init docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying." info "Backup container not ready yet. Retrying."
sleep 1 sleep 1
done done
@ -47,16 +46,14 @@ sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archi
tar -xvf ./local/test.tar.gz tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then if [ ! -f ./backup/data/dump.sql ]; then
echo "[TEST:FAIL] Could not find file written by pre command." fail "Could not find file written by pre command."
exit 1
fi fi
echo "[TEST:PASS] Found expected file." pass "Found expected file."
if [ -f ./backup/data/post.txt ]; then if [ -f ./backup/data/post.txt ]; then
echo "[TEST:FAIL] File created in post command was present in backup." fail "File created in post command was present in backup."
exit 1
fi fi
echo "[TEST:PASS] Did not find unexpected file." pass "Did not find unexpected file."
docker stack rm test_stack docker stack rm test_stack
docker swarm leave --force docker swarm leave --force

View File

@ -1,74 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
mkdir -p local
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker-compose exec backup backup
sleep 5
if [ "$(docker-compose ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data \
-v compose_ssh_backup_data:/ssh_data alpine \
ash -c 'apk add gnupg && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /ssh_data/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
test -L /tmp/backup/app_data/db.link
echo "[TEST:PASS] Found relevant files in decrypted and untared local backup."
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
echo "[TEST:PASS] Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data \
-v compose_ssh_backup_data:/ssh_data alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ] && \
[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
exit 1
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@ -3,6 +3,8 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
@ -14,19 +16,16 @@ sleep 100
docker-compose down --volumes docker-compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then if [ ! -f ./local/conf.tar.gz ]; then
echo "[TEST:FAIL] Config from file was not used." fail "Config from file was not used."
exit 1
fi fi
echo "[TEST:PASS] Config from file was used." pass "Config from file was used."
if [ ! -f ./local/other.tar.gz ]; then if [ ! -f ./local/other.tar.gz ]; then
echo "[TEST:FAIL] Run on same schedule did not succeed." fail "Run on same schedule did not succeed."
exit 1
fi fi
echo "[TEST:PASS] Run on same schedule succeeded." pass "Run on same schedule succeeded."
if [ -f ./local/never.tar.gz ]; then if [ -f ./local/never.tar.gz ]; then
echo "[TEST:FAIL] Unexpected file was found." fail "Unexpected file was found."
exit 1
fi fi
echo "[TEST:PASS] Unexpected cron did not run." pass "Unexpected cron did not run."

View File

@ -0,0 +1,26 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
GPG_PASSPHRASE: 1234secret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

34
test/gpg/run.sh Executable file
View File

@ -0,0 +1,34 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
sleep 5
docker-compose exec backup backup
expect_running_containers "2"
tmp_dir=$(mktemp -d)
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
ls -lah $tmp_dir
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
fail "Could not find expected file in untared archive."
fi
rm ./local/decrypted.tar.gz
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup."
fi
docker-compose down --volumes

View File

@ -3,6 +3,9 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker-compose up -d docker-compose up -d
@ -15,13 +18,11 @@ out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out" sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
if [ ! -f "$out/backup/data/me.txt" ]; then if [ ! -f "$out/backup/data/me.txt" ]; then
echo "[TEST:FAIL] Expected file was not found." fail "Expected file was not found."
exit 1
fi fi
echo "[TEST:PASS] Expected file was found." pass "Expected file was found."
if [ -f "$out/backup/data/skip.me" ]; then if [ -f "$out/backup/data/skip.me" ]; then
echo "[TEST:FAIL] Ignored file was found." fail "Ignored file was found."
exit 1
fi fi
echo "[TEST:PASS] Ignored file was not found." pass "Ignored file was not found."

1
test/local/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
local

View File

@ -0,0 +1,29 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

55
test/local/run.sh Executable file
View File

@ -0,0 +1,55 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker-compose exec backup backup
sleep 5
expect_running_containers "2"
tmp_dir=$(mktemp -d)
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
rm -f ./local/test-hostnametoken.tar.gz
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
fail "Could not find expected symlink in untared archive."
fi
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
fail "Could not find symlink to latest version."
fi
pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi
pass "Local backups have not been deleted."
docker-compose down --volumes

View File

@ -3,6 +3,8 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
@ -10,16 +12,15 @@ docker-compose up -d
sleep 5 sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token') GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
echo "[TEST:INFO] Set up Gotify application using token $GOTIFY_TOKEN" info "Set up Gotify application using token $GOTIFY_TOKEN"
docker-compose exec backup backup docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length') NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then if [ "$NUM_MESSAGES" != 0 ]; then
echo "[TEST:FAIL] Expected no notifications to be sent when not configured" fail "Expected no notifications to be sent when not configured"
exit 1
fi fi
echo "[TEST:PASS] No notifications were sent when not configured." pass "No notifications were sent when not configured."
docker-compose down docker-compose down
@ -29,24 +30,21 @@ docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length') NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then if [ "$NUM_MESSAGES" != 1 ]; then
echo "[TEST:FAIL] Expected one notifications to be sent when configured" fail "Expected one notifications to be sent when configured"
exit 1
fi fi
echo "[TEST:PASS] Correct number of notifications were sent when configured." pass "Correct number of notifications were sent when configured."
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title') MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message') MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then
echo "[TEST:FAIL] Unexpected notification title $MESSAGE_TITLE" fail "Unexpected notification title $MESSAGE_TITLE"
exit 1
fi fi
echo "[TEST:PASS] Custom notification title was used." pass "Custom notification title was used."
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
echo "[TEST:FAIL] Unexpected notification body $MESSAGE_BODY" fail "Unexpected notification body $MESSAGE_BODY"
exit 1
fi fi
echo "[TEST:PASS] Custom notification body was used." pass "Custom notification body was used."
docker-compose down --volumes docker-compose down --volumes

View File

@ -4,6 +4,8 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
@ -12,17 +14,17 @@ sleep 5
docker-compose exec backup backup docker-compose exec backup backup
sudo tar --same-owner -xvf ./local/backup.tar.gz -C /tmp tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
sudo find /tmp/backup/postgres > /dev/null sudo find $tmp_dir/backup/postgres > /dev/null
echo "[TEST:PASS] Backup contains files at expected location" pass "Backup contains files at expected location"
for file in $(sudo find /tmp/backup/postgres); do for file in $(sudo find $tmp_dir/backup/postgres); do
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
echo "[TEST:FAIL] Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)" fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
exit 1
fi fi
done done
echo "[TEST:PASS] All files and directories in backup preserved their ownership." pass "All files and directories in backup preserved their ownership."
docker-compose down --volumes docker-compose down --volumes

View File

@ -12,33 +12,11 @@ services:
volumes: volumes:
- minio_backup_data:/data - minio_backup_data:/data
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
- ssh_config:/config
backup: backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary} image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken hostname: hostnametoken
depends_on: depends_on:
- minio - minio
- webdav
- ssh
restart: always restart: always
environment: environment:
AWS_ACCESS_KEY_ID: test AWS_ACCESS_KEY_ID: test
@ -48,25 +26,11 @@ services:
AWS_S3_BUCKET_NAME: backup AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true' BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ? BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7} BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes: volumes:
- ./local:/archive
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro - app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
@ -79,7 +43,5 @@ services:
volumes: volumes:
minio_backup_data: minio_backup_data:
webdav_backup_data: name: minio_backup_data
ssh_backup_data:
ssh_config:
app_data: app_data:

42
test/s3/run.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm -it \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v minio_backup_data:/minio_data \
alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker-compose down --volumes

View File

@ -0,0 +1,47 @@
version: '3'
services:
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- ssh
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes:
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
ssh_backup_data:
name: ssh_backup_data
app_data:

43
test/ssh/run.sh Executable file
View File

@ -0,0 +1,43 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker-compose up -d
sleep 5
docker-compose exec backup backup
sleep 5
expect_running_containers 3
docker run --rm -it \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker-compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@ -64,4 +64,5 @@ services:
volumes: volumes:
backup_data: backup_data:
name: backup_data
pg_data: pg_data:

View File

@ -3,13 +3,15 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying." info "Backup container not ready yet. Retrying."
sleep 1 sleep 1
done done
@ -18,18 +20,13 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \ docker run --rm -it \
-v test_stack_backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION' ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
echo "[TEST:PASS] Found relevant files in untared backup." pass "Found relevant files in untared backup."
sleep 5 sleep 5
if [ "$(docker ps -q | wc -l)" != "5" ]; then expect_running_containers "5"
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack docker stack rm test_stack
docker swarm leave --force docker swarm leave --force

23
test/util.sh Normal file
View File

@ -0,0 +1,23 @@
#!/bin/sh
set -e
info () {
echo "[test:${current_test:-none}:info] "$1""
}
pass () {
echo "[test:${current_test:-none}:pass] "$1""
}
fail () {
echo "[test:${current_test:-none}:fail] "$1""
exit 1
}
expect_running_containers () {
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
fi
pass "$1 containers running."
}

View File

@ -0,0 +1,45 @@
version: '3'
services:
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- webdav
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
webdav_backup_data:
name: webdav_backup_data
app_data:

40
test/webdav/run.sh Executable file
View File

@ -0,0 +1,40 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker-compose up -d
sleep 5
docker-compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm -it \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker-compose down --volumes