Exclude specific backends from pruning (#262)

* Skip backends while pruning

* Add pruning test step and silence download log for better readability

* Add test cases for pruning in all backends

Also add -q or --quiet-pull to all tests.

* Add test case for skipping backends while pruning

* Adjusted test logging, generate new test spec file

* Gitignore for temp test file
This commit is contained in:
MaxJa4 2023-08-27 19:19:11 +02:00 committed by GitHub
parent 5fcc96edf9
commit ad4e2af83f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 330 additions and 40 deletions

View File

@ -205,6 +205,15 @@ You can populate below template according to your requirements and use it as you
# BACKUP_EXCLUDE_REGEXP="\.log$" # BACKUP_EXCLUDE_REGEXP="\.log$"
# Exclude one or many storage backends from the pruning process.
# E.g. with one backend excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3
# E.g. with multiple backends excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3,webdav
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
# Note: The name of the backends is case insensitive.
# Default: All backends get pruned.
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=
########### BACKUP STORAGE ########### BACKUP STORAGE
# The name of the remote bucket that should be used for storing backups. If # The name of the remote bucket that should be used for storing backups. If

View File

@ -41,6 +41,7 @@ type Config struct {
BackupStopContainerLabel string `split_words:"true" default:"true"` BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"` BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"` BackupExcludeRegexp RegexpDecoder `split_words:"true"`
BackupSkipBackendsFromPrune []string `split_words:"true"`
GpgPassphrase string `split_words:"true"` GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"` NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"` NotificationLevel string `split_words:"true" default:"error"`

View File

@ -14,6 +14,8 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"slices"
"strings"
"text/template" "text/template"
"time" "time"
@ -591,6 +593,12 @@ func (s *script) pruneBackups() error {
for _, backend := range s.storages { for _, backend := range s.storages {
b := backend b := backend
eg.Go(func() error { eg.Go(func() error {
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
s.logger.Info(
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
)
return nil
}
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix) stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
if err != nil { if err != nil {
return err return err
@ -622,3 +630,14 @@ func (s *script) must(err error) {
panic(err) panic(err)
} }
} }
// skipPrune returns true if the given backend name is contained in the
// list of skipped backends.
func skipPrune(name string, skippedBackends []string) bool {
return slices.ContainsFunc(
skippedBackends,
func(b string) bool {
return strings.EqualFold(b, name) // ignore case on both sides
},
)
}

View File

@ -6,35 +6,61 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d download_az () {
docker compose run --rm az_cli \
az storage blob download -f /dump/$1.tar.gz -c test-container -n path/to/backup/$1.tar.gz
}
docker compose up -d --quiet-pull
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup docker compose exec backup backup
sleep 5 sleep 5
expect_running_containers "3" expect_running_containers "3"
docker compose run --rm az_cli \ download_az "test"
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups." pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup
docker compose run --rm az_cli \ download_az "test"
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz test -f ./local/test.tar.gz
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create first backup with no prune"
docker compose exec backup backup
sudo date --set="14 days ago"
docker compose run --rm az_cli \
az storage blob upload -f /dump/test.tar.gz -c test-container -n path/to/backup/test-old.tar.gz
sudo date --set="14 days"
info "Create second backup and prune"
docker compose exec backup backup
info "Download first backup which should be pruned"
download_az "test-old" || true
test ! -f ./local/test-old.tar.gz
test -f ./local/test.tar.gz
pass "Old remote backup has been pruned, new one is still present."
docker compose down --volumes docker compose down --volumes

View File

@ -24,7 +24,7 @@ openssl x509 -req -passin pass:test \
openssl x509 -in minio.crt -noout -text openssl x509 -in minio.crt -noout -text
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup

View File

@ -13,7 +13,7 @@ docker volume create app_data
# correctly. It is not supposed to hold any data. # correctly. It is not supposed to hold any data.
docker volume create empty_data docker volume create empty_data
docker run -d \ docker run -d -q \
--name minio \ --name minio \
--network test_network \ --network test_network \
--env MINIO_ROOT_USER=test \ --env MINIO_ROOT_USER=test \
@ -25,7 +25,7 @@ docker run -d \
docker exec minio mkdir -p /data/backup docker exec minio mkdir -p /data/backup
docker run -d \ docker run -d -q \
--name offen \ --name offen \
--network test_network \ --network test_network \
-v app_data:/var/opt/offen/ \ -v app_data:/var/opt/offen/ \
@ -33,7 +33,7 @@ docker run -d \
sleep 10 sleep 10
docker run --rm \ docker run --rm -q \
--network test_network \ --network test_network \
-v app_data:/backup/app_data \ -v app_data:/backup/app_data \
-v empty_data:/backup/empty_data \ -v empty_data:/backup/empty_data \
@ -48,7 +48,7 @@ docker run --rm \
--entrypoint backup \ --entrypoint backup \
offen/docker-volume-backup:${TEST_VERSION:-canary} offen/docker-volume-backup:${TEST_VERSION:-canary}
docker run --rm \ docker run --rm -q \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data' ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p ./local mkdir -p ./local
docker compose up -d docker compose up -d --quiet-pull
sleep 30 # mariadb likes to take a bit before responding sleep 30 # mariadb likes to take a bit before responding
docker compose exec backup backup docker compose exec backup backup

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
# sleep until a backup is guaranteed to have happened on the 1 minute schedule # sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100 sleep 100

1
test/dropbox/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
user_v2_ready.yaml

View File

@ -9,7 +9,7 @@ services:
ports: ports:
- 8080:8080 - 8080:8080
volumes: volumes:
- ./user_v2.yaml:/etc/openapi/user_v2.yaml - ./user_v2_ready.yaml:/etc/openapi/user_v2.yaml
oauth2_mock: oauth2_mock:
image: ghcr.io/navikt/mock-oauth2-server:1.0.0 image: ghcr.io/navikt/mock-oauth2-server:1.0.0

View File

@ -6,7 +6,11 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d cp user_v2.yaml user_v2_ready.yaml
sudo sed -i 's/SERVER_MODIFIED_1/'"$(date "+%Y-%m-%dT%H:%M:%SZ")/g" user_v2_ready.yaml
sudo sed -i 's/SERVER_MODIFIED_2/'"$(date "+%Y-%m-%dT%H:%M:%SZ" -d "14 days ago")/g" user_v2_ready.yaml
docker compose up -d --quiet-pull
sleep 5 sleep 5
logs=$(docker compose exec -T backup backup) logs=$(docker compose exec -T backup backup)
@ -17,14 +21,13 @@ expect_running_containers "4"
echo "$logs" echo "$logs"
if echo "$logs" | grep -q "ERROR"; then if echo "$logs" | grep -q "ERROR"; then
fail "Backup failed, errors reported: $dvb_logs" fail "Backup failed, errors reported: $logs"
else else
pass "Backup succeeded, no errors reported." pass "Backup succeeded, no errors reported."
fi fi
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
@ -34,7 +37,29 @@ echo "$logs"
if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
else else
fail "Remote backups would have been deleted: $dvb_logs" fail "Remote backups would have been deleted: $logs"
fi fi
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create second backup and prune"
logs=$(docker compose exec -T backup backup)
echo "$logs"
if echo "$logs" | grep -q "Pruned 1 out of 2 backups as their age exceeded the configured retention period"; then
pass "Old remote backup has been pruned, new one is still present."
elif echo "$logs" | grep -q "ERROR"; then
fail "Pruning failed, errors reported: $logs"
elif echo "$logs" | grep -q "None of 1 existing backups were pruned"; then
fail "Pruning failed, old backup has not been pruned: $logs"
else
fail "Pruning failed, unknown result: $logs"
fi
docker compose down --volumes docker compose down --volumes
rm user_v2_ready.yaml

View File

@ -1618,7 +1618,7 @@ paths:
$ref: '#/components/schemas/ListFolderResult' $ref: '#/components/schemas/ListFolderResult'
examples: examples:
Testexample: Testexample:
value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T15:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T04-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T04-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T04-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "2015-05-12T15:50:38Z", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 }, { ".tag": "folder", "id": "id:a4ayc_80_OEAAAAAAAAAXz", "name": "math", "path_display": "/Homework/math", "path_lower": "/homework/math", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "sharing_info": { "no_access": false, "parent_shared_folder_id": "84528192421", "read_only": false, "traverse_only": false } } ], "has_more": true } value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T15:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T04-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T04-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T04-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "SERVER_MODIFIED_1", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 }, { ".tag": "folder", "id": "id:a4ayc_80_OEAAAAAAAAAXz", "name": "math", "path_display": "/Homework/math", "path_lower": "/homework/math", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "sharing_info": { "no_access": false, "parent_shared_folder_id": "84528192421", "read_only": false, "traverse_only": false } } ], "has_more": true }
default: default:
description: Error description: Error
content: content:
@ -1749,7 +1749,7 @@ paths:
$ref: '#/components/schemas/ListFolderResult' $ref: '#/components/schemas/ListFolderResult'
examples: examples:
Testexample: Testexample:
value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T12:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T02-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T02-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T02-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "2015-05-12T12:50:38Z", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 } ], "has_more": false } value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T12:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T02-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T02-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T02-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "SERVER_MODIFIED_2", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 } ], "has_more": false }
default: default:
description: Error description: Error
content: content:

View File

@ -13,7 +13,7 @@ export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check # A symlink for a known file in the volume is created so the test can check
@ -41,7 +41,6 @@ pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
@ -52,4 +51,23 @@ if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fi fi
pass "Local backups have not been deleted." pass "Local backups have not been deleted."
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create first backup with no prune"
docker compose exec backup backup
touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz
info "Create second backup and prune"
docker compose exec backup backup
test ! -f ./local/test-hostnametoken-old.tar.gz
test -f ./local/test-hostnametoken.tar.gz
pass "Old remote backup has been pruned, new one is still present."
docker compose down --volumes docker compose down --volumes

View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token') GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')

View File

@ -9,7 +9,7 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup

View File

@ -0,0 +1,50 @@
version: '3'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- minio_backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- minio
restart: always
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz
BACKUP_SKIP_BACKENDS_FROM_PRUNE: 's3'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:
minio_backup_data:
name: minio_backup_data

70
test/pruning/run.sh Normal file
View File

@ -0,0 +1,70 @@
#!/bin/sh
# Tests prune-skipping with multiple backends (local, s3)
# Pruning itself is tested individually for each storage backend
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d --quiet-pull
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz'
# Skip s3 backend from prune
docker compose up -d
sleep 5
info "Create backup with no prune for s3 backend"
docker compose exec backup backup
info "Check if old backup has been pruned (local)"
test ! -f ./local/test-hostnametoken-old.tar.gz
info "Check if old backup has NOT been pruned (s3)"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz'
pass "Old remote backup has been pruned locally, skipped S3 backend is untouched."
# Skip local and s3 backend from prune (all backends)
touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz
docker compose up -d
sleep 5
info "Create backup with no prune for both backends"
docker compose exec -e BACKUP_SKIP_BACKENDS_FROM_PRUNE="s3,local" backup backup
info "Check if old backup has NOT been pruned (local)"
test -f ./local/test-hostnametoken-old.tar.gz
info "Check if old backup has NOT been pruned (s3)"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz'
pass "Skipped all backends while pruning."
docker compose down --volumes

29
test/s3/run.sh Executable file → Normal file
View File

@ -6,11 +6,9 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup docker compose exec backup backup
sleep 5 sleep 5
@ -26,7 +24,6 @@ pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
@ -39,4 +36,28 @@ docker run --rm \
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create first backup with no prune"
docker compose exec backup backup
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz'
info "Create second backup and prune"
docker compose exec backup backup
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'test ! -f /minio_data/backup/test-hostnametoken-old.tar.gz && test -f /minio_data/backup/test-hostnametoken.tar.gz'
pass "Old remote backup has been pruned, new one is still present."
docker compose down --volumes docker compose down --volumes

29
test/ssh/run.sh Executable file → Normal file
View File

@ -8,7 +8,7 @@ current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local" ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup
@ -26,7 +26,6 @@ pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
@ -39,5 +38,31 @@ docker run --rm \
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create first backup with no prune"
docker compose exec backup backup
# Set the modification date of the old backup to 14 days ago
docker run --rm \
-v ssh_backup_data:/ssh_data \
--user 1000 \
alpine \
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /ssh_data/test-hostnametoken-old.tar.gz'
info "Create second backup and prune"
docker compose exec backup backup
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'test ! -f /ssh_data/test-hostnametoken-old.tar.gz && test -f /ssh_data/test-hostnametoken.tar.gz'
pass "Old remote backup has been pruned, new one is still present."
docker compose down --volumes docker compose down --volumes
rm -f id_rsa id_rsa.pub rm -f id_rsa id_rsa.pub

View File

@ -6,7 +6,7 @@ cd $(dirname $0)
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker compose up -d --quiet-pull
user_name=testuser user_name=testuser
docker exec user-alpine-1 adduser --disabled-password "$user_name" docker exec user-alpine-1 adduser --disabled-password "$user_name"

29
test/webdav/run.sh Executable file → Normal file
View File

@ -6,7 +6,7 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker compose up -d --quiet-pull
sleep 5 sleep 5
docker compose exec backup backup docker compose exec backup backup
@ -24,7 +24,6 @@ pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5 sleep 5
@ -37,4 +36,30 @@ docker run --rm \
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
# The third part of this test checks if old backups get deleted when the retention
# is set to 7 days (which it should)
BACKUP_RETENTION_DAYS="7" docker compose up -d
sleep 5
info "Create first backup with no prune"
docker compose exec backup backup
# Set the modification date of the old backup to 14 days ago
docker run --rm \
-v webdav_backup_data:/webdav_data \
--user 82 \
alpine \
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz'
info "Create second backup and prune"
docker compose exec backup backup
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'test ! -f /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz && test -f /webdav_data/data/my/new/path/test-hostnametoken.tar.gz'
pass "Old remote backup has been pruned, new one is still present."
docker compose down --volumes docker compose down --volumes

View File

@ -11,7 +11,7 @@ docker volume create app_data
mkdir -p local mkdir -p local
docker run -d \ docker run -d -q \
--name offen \ --name offen \
--network test_network \ --network test_network \
-v app_data:/var/opt/offen/ \ -v app_data:/var/opt/offen/ \
@ -19,7 +19,7 @@ docker run -d \
sleep 10 sleep 10
docker run --rm \ docker run --rm -q \
--network test_network \ --network test_network \
-v app_data:/backup/app_data \ -v app_data:/backup/app_data \
-v ./local:/archive \ -v ./local:/archive \