diff --git a/README.md b/README.md index cb20830..418393f 100644 --- a/README.md +++ b/README.md @@ -205,6 +205,15 @@ You can populate below template according to your requirements and use it as you # BACKUP_EXCLUDE_REGEXP="\.log$" +# Exclude one or many storage backends from the pruning process. +# E.g. with one backend excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3 +# E.g. with multiple backends excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3,webdav +# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure +# Note: The name of the backends is case insensitive. +# Default: All backends get pruned. + +# BACKUP_SKIP_BACKENDS_FROM_PRUNE= + ########### BACKUP STORAGE # The name of the remote bucket that should be used for storing backups. If diff --git a/cmd/backup/config.go b/cmd/backup/config.go index 857b6ea..d6805f9 100644 --- a/cmd/backup/config.go +++ b/cmd/backup/config.go @@ -41,6 +41,7 @@ type Config struct { BackupStopContainerLabel string `split_words:"true" default:"true"` BackupFromSnapshot bool `split_words:"true"` BackupExcludeRegexp RegexpDecoder `split_words:"true"` + BackupSkipBackendsFromPrune []string `split_words:"true"` GpgPassphrase string `split_words:"true"` NotificationURLs []string `envconfig:"NOTIFICATION_URLS"` NotificationLevel string `split_words:"true" default:"error"` diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 62d80ed..5a455c3 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -14,6 +14,8 @@ import ( "os" "path" "path/filepath" + "slices" + "strings" "text/template" "time" @@ -591,6 +593,12 @@ func (s *script) pruneBackups() error { for _, backend := range s.storages { b := backend eg.Go(func() error { + if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) { + s.logger.Info( + fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()), + ) + return nil + } stats, err := b.Prune(deadline, s.c.BackupPruningPrefix) if err != nil { return err @@ -622,3 +630,14 @@ func (s *script) must(err error) { panic(err) } } + +// skipPrune returns true if the given backend name is contained in the +// list of skipped backends. +func skipPrune(name string, skippedBackends []string) bool { + return slices.ContainsFunc( + skippedBackends, + func(b string) bool { + return strings.EqualFold(b, name) // ignore case on both sides + }, + ) +} diff --git a/test/azure/run.sh b/test/azure/run.sh index 54de328..f645535 100644 --- a/test/azure/run.sh +++ b/test/azure/run.sh @@ -6,35 +6,61 @@ cd "$(dirname "$0")" . ../util.sh current_test=$(basename $(pwd)) -docker compose up -d +download_az () { + docker compose run --rm az_cli \ + az storage blob download -f /dump/$1.tar.gz -c test-container -n path/to/backup/$1.tar.gz +} + +docker compose up -d --quiet-pull sleep 5 -# A symlink for a known file in the volume is created so the test can check -# whether symlinks are preserved on backup. docker compose exec backup backup sleep 5 expect_running_containers "3" -docker compose run --rm az_cli \ - az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz +download_az "test" tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db pass "Found relevant files in untared remote backups." # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 docker compose exec backup backup -docker compose run --rm az_cli \ - az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz +download_az "test" test -f ./local/test.tar.gz pass "Remote backups have not been deleted." +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create first backup with no prune" +docker compose exec backup backup + +sudo date --set="14 days ago" + +docker compose run --rm az_cli \ + az storage blob upload -f /dump/test.tar.gz -c test-container -n path/to/backup/test-old.tar.gz + +sudo date --set="14 days" + +info "Create second backup and prune" +docker compose exec backup backup + +info "Download first backup which should be pruned" +download_az "test-old" || true +test ! -f ./local/test-old.tar.gz +test -f ./local/test.tar.gz + +pass "Old remote backup has been pruned, new one is still present." + docker compose down --volumes diff --git a/test/certs/run.sh b/test/certs/run.sh index f8446d5..9f5e2d0 100644 --- a/test/certs/run.sh +++ b/test/certs/run.sh @@ -24,7 +24,7 @@ openssl x509 -req -passin pass:test \ openssl x509 -in minio.crt -noout -text -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup diff --git a/test/cli/run.sh b/test/cli/run.sh index 07b1906..a7f5d6e 100755 --- a/test/cli/run.sh +++ b/test/cli/run.sh @@ -13,7 +13,7 @@ docker volume create app_data # correctly. It is not supposed to hold any data. docker volume create empty_data -docker run -d \ +docker run -d -q \ --name minio \ --network test_network \ --env MINIO_ROOT_USER=test \ @@ -25,7 +25,7 @@ docker run -d \ docker exec minio mkdir -p /data/backup -docker run -d \ +docker run -d -q \ --name offen \ --network test_network \ -v app_data:/var/opt/offen/ \ @@ -33,7 +33,7 @@ docker run -d \ sleep 10 -docker run --rm \ +docker run --rm -q \ --network test_network \ -v app_data:/backup/app_data \ -v empty_data:/backup/empty_data \ @@ -48,7 +48,7 @@ docker run --rm \ --entrypoint backup \ offen/docker-volume-backup:${TEST_VERSION:-canary} -docker run --rm \ +docker run --rm -q \ -v backup_data:/data alpine \ ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data' diff --git a/test/commands/run.sh b/test/commands/run.sh index aa33e1f..15006ae 100644 --- a/test/commands/run.sh +++ b/test/commands/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p ./local -docker compose up -d +docker compose up -d --quiet-pull sleep 30 # mariadb likes to take a bit before responding docker compose exec backup backup diff --git a/test/confd/run.sh b/test/confd/run.sh index ed2f911..3db626d 100755 --- a/test/confd/run.sh +++ b/test/confd/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull # sleep until a backup is guaranteed to have happened on the 1 minute schedule sleep 100 diff --git a/test/dropbox/.gitignore b/test/dropbox/.gitignore new file mode 100644 index 0000000..9724724 --- /dev/null +++ b/test/dropbox/.gitignore @@ -0,0 +1 @@ +user_v2_ready.yaml diff --git a/test/dropbox/docker-compose.yml b/test/dropbox/docker-compose.yml index 0b3e581..8961456 100644 --- a/test/dropbox/docker-compose.yml +++ b/test/dropbox/docker-compose.yml @@ -9,7 +9,7 @@ services: ports: - 8080:8080 volumes: - - ./user_v2.yaml:/etc/openapi/user_v2.yaml + - ./user_v2_ready.yaml:/etc/openapi/user_v2.yaml oauth2_mock: image: ghcr.io/navikt/mock-oauth2-server:1.0.0 diff --git a/test/dropbox/run.sh b/test/dropbox/run.sh index e581c12..de208e8 100644 --- a/test/dropbox/run.sh +++ b/test/dropbox/run.sh @@ -6,7 +6,11 @@ cd "$(dirname "$0")" . ../util.sh current_test=$(basename $(pwd)) -docker compose up -d +cp user_v2.yaml user_v2_ready.yaml +sudo sed -i 's/SERVER_MODIFIED_1/'"$(date "+%Y-%m-%dT%H:%M:%SZ")/g" user_v2_ready.yaml +sudo sed -i 's/SERVER_MODIFIED_2/'"$(date "+%Y-%m-%dT%H:%M:%SZ" -d "14 days ago")/g" user_v2_ready.yaml + +docker compose up -d --quiet-pull sleep 5 logs=$(docker compose exec -T backup backup) @@ -17,14 +21,13 @@ expect_running_containers "4" echo "$logs" if echo "$logs" | grep -q "ERROR"; then - fail "Backup failed, errors reported: $dvb_logs" + fail "Backup failed, errors reported: $logs" else pass "Backup succeeded, no errors reported." fi # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 @@ -34,7 +37,29 @@ echo "$logs" if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then pass "Remote backups have not been deleted." else - fail "Remote backups would have been deleted: $dvb_logs" + fail "Remote backups would have been deleted: $logs" fi +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create second backup and prune" +logs=$(docker compose exec -T backup backup) + +echo "$logs" +if echo "$logs" | grep -q "Pruned 1 out of 2 backups as their age exceeded the configured retention period"; then + pass "Old remote backup has been pruned, new one is still present." +elif echo "$logs" | grep -q "ERROR"; then + fail "Pruning failed, errors reported: $logs" +elif echo "$logs" | grep -q "None of 1 existing backups were pruned"; then + fail "Pruning failed, old backup has not been pruned: $logs" +else + fail "Pruning failed, unknown result: $logs" +fi + + docker compose down --volumes +rm user_v2_ready.yaml diff --git a/test/dropbox/user_v2.yaml b/test/dropbox/user_v2.yaml index ca9edd7..fd70e1e 100644 --- a/test/dropbox/user_v2.yaml +++ b/test/dropbox/user_v2.yaml @@ -1618,7 +1618,7 @@ paths: $ref: '#/components/schemas/ListFolderResult' examples: Testexample: - value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T15:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T04-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T04-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T04-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "2015-05-12T15:50:38Z", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 }, { ".tag": "folder", "id": "id:a4ayc_80_OEAAAAAAAAAXz", "name": "math", "path_display": "/Homework/math", "path_lower": "/homework/math", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "sharing_info": { "no_access": false, "parent_shared_folder_id": "84528192421", "read_only": false, "traverse_only": false } } ], "has_more": true } + value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T15:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T04-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T04-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T04-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "SERVER_MODIFIED_1", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 }, { ".tag": "folder", "id": "id:a4ayc_80_OEAAAAAAAAAXz", "name": "math", "path_display": "/Homework/math", "path_lower": "/homework/math", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "sharing_info": { "no_access": false, "parent_shared_folder_id": "84528192421", "read_only": false, "traverse_only": false } } ], "has_more": true } default: description: Error content: @@ -1749,7 +1749,7 @@ paths: $ref: '#/components/schemas/ListFolderResult' examples: Testexample: - value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T12:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T02-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T02-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T02-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "2015-05-12T12:50:38Z", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 } ], "has_more": false } + value: { "cursor": "ZtkX9_EHj3x7PMkVuFIhwKYXEpwpLwyxp9vMKomUhllil9q7eWiAu", "entries": [ { ".tag": "file", "client_modified": "2015-05-12T15:50:38Z", "content_hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "file_lock_info": { "created": "2015-05-12T12:50:38Z", "is_lockholder": true, "lockholder_name": "Imaginary User" }, "has_explicit_shared_members": false, "id": "id:a4ayc_80_OEAAAAAAAAAXw", "is_downloadable": true, "name": "test-2021-08-29T02-00-00.tar.gz", "path_display": "/somepath/test-2021-08-29T02-00-00.tar.gz", "path_lower": "/somepath/test-2021-08-29T02-00-00.tar.gz", "property_groups": [ { "fields": [ { "name": "Security Policy", "value": "Confidential" } ], "template_id": "ptid:1a5n2i6d3OYEAAAAAAAAAYa" } ], "rev": "a1c10ce0dd78", "server_modified": "SERVER_MODIFIED_2", "sharing_info": { "modified_by": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc", "parent_shared_folder_id": "84528192421", "read_only": true }, "size": 7212 } ], "has_more": false } default: description: Error content: diff --git a/test/extend/run.sh b/test/extend/run.sh index b083e17..9794630 100644 --- a/test/extend/run.sh +++ b/test/extend/run.sh @@ -13,7 +13,7 @@ export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync" docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup diff --git a/test/gpg/run.sh b/test/gpg/run.sh index 1efaa6b..c4d0d7b 100755 --- a/test/gpg/run.sh +++ b/test/gpg/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup diff --git a/test/ignore/run.sh b/test/ignore/run.sh index b5699da..e36e145 100644 --- a/test/ignore/run.sh +++ b/test/ignore/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup diff --git a/test/local/run.sh b/test/local/run.sh index 713929c..c55bff6 100755 --- a/test/local/run.sh +++ b/test/local/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull sleep 5 # A symlink for a known file in the volume is created so the test can check @@ -41,7 +41,6 @@ pass "Found symlink to latest version in local backup." # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 @@ -52,4 +51,23 @@ if [ "$(find ./local -type f | wc -l)" != "1" ]; then fi pass "Local backups have not been deleted." +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create first backup with no prune" +docker compose exec backup backup + +touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz + +info "Create second backup and prune" +docker compose exec backup backup + +test ! -f ./local/test-hostnametoken-old.tar.gz +test -f ./local/test-hostnametoken.tar.gz + +pass "Old remote backup has been pruned, new one is still present." + docker compose down --volumes diff --git a/test/notifications/run.sh b/test/notifications/run.sh index 68efcf7..9142ff2 100755 --- a/test/notifications/run.sh +++ b/test/notifications/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull sleep 5 GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token') diff --git a/test/ownership/run.sh b/test/ownership/run.sh index f2a1873..7582cde 100644 --- a/test/ownership/run.sh +++ b/test/ownership/run.sh @@ -9,7 +9,7 @@ current_test=$(basename $(pwd)) mkdir -p local -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup diff --git a/test/pruning/docker-compose.yml b/test/pruning/docker-compose.yml new file mode 100644 index 0000000..c54a998 --- /dev/null +++ b/test/pruning/docker-compose.yml @@ -0,0 +1,50 @@ +version: '3' + +services: + minio: + image: minio/minio:RELEASE.2020-08-04T23-10-51Z + environment: + MINIO_ROOT_USER: test + MINIO_ROOT_PASSWORD: test + MINIO_ACCESS_KEY: test + MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ + entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data' + volumes: + - minio_backup_data:/data + + backup: + image: offen/docker-volume-backup:${TEST_VERSION:-canary} + hostname: hostnametoken + depends_on: + - minio + restart: always + environment: + AWS_ACCESS_KEY_ID: test + AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ + AWS_ENDPOINT: minio:9000 + AWS_ENDPOINT_PROTO: http + AWS_S3_BUCKET_NAME: backup + BACKUP_FILENAME_EXPAND: 'true' + BACKUP_FILENAME: test-$$HOSTNAME.tar.gz + BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ? + BACKUP_RETENTION_DAYS: 7 + BACKUP_PRUNING_LEEWAY: 5s + BACKUP_PRUNING_PREFIX: test + BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz + BACKUP_SKIP_BACKENDS_FROM_PRUNE: 's3' + volumes: + - app_data:/backup/app_data:ro + - /var/run/docker.sock:/var/run/docker.sock + - ./local:/archive + + offen: + image: offen/offen:latest + labels: + - docker-volume-backup.stop-during-backup=true + volumes: + - app_data:/var/opt/offen + +volumes: + app_data: + minio_backup_data: + name: minio_backup_data diff --git a/test/pruning/run.sh b/test/pruning/run.sh new file mode 100644 index 0000000..2909fd3 --- /dev/null +++ b/test/pruning/run.sh @@ -0,0 +1,70 @@ +#!/bin/sh + +# Tests prune-skipping with multiple backends (local, s3) +# Pruning itself is tested individually for each storage backend + +set -e + +cd "$(dirname "$0")" +. ../util.sh +current_test=$(basename $(pwd)) + +mkdir -p local + +docker compose up -d --quiet-pull +sleep 5 + +docker compose exec backup backup + +sleep 5 + +expect_running_containers "3" + +touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz + +docker run --rm \ + -v minio_backup_data:/minio_data \ + alpine \ + ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz' + +# Skip s3 backend from prune + +docker compose up -d +sleep 5 + +info "Create backup with no prune for s3 backend" +docker compose exec backup backup + +info "Check if old backup has been pruned (local)" +test ! -f ./local/test-hostnametoken-old.tar.gz + +info "Check if old backup has NOT been pruned (s3)" +docker run --rm \ + -v minio_backup_data:/minio_data \ + alpine \ + ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz' + +pass "Old remote backup has been pruned locally, skipped S3 backend is untouched." + +# Skip local and s3 backend from prune (all backends) + +touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz + +docker compose up -d +sleep 5 + +info "Create backup with no prune for both backends" +docker compose exec -e BACKUP_SKIP_BACKENDS_FROM_PRUNE="s3,local" backup backup + +info "Check if old backup has NOT been pruned (local)" +test -f ./local/test-hostnametoken-old.tar.gz + +info "Check if old backup has NOT been pruned (s3)" +docker run --rm \ + -v minio_backup_data:/minio_data \ + alpine \ + ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz' + +pass "Skipped all backends while pruning." + +docker compose down --volumes diff --git a/test/s3/run.sh b/test/s3/run.sh old mode 100755 new mode 100644 index 4022ae4..cc93818 --- a/test/s3/run.sh +++ b/test/s3/run.sh @@ -6,11 +6,9 @@ cd "$(dirname "$0")" . ../util.sh current_test=$(basename $(pwd)) -docker compose up -d +docker compose up -d --quiet-pull sleep 5 -# A symlink for a known file in the volume is created so the test can check -# whether symlinks are preserved on backup. docker compose exec backup backup sleep 5 @@ -26,7 +24,6 @@ pass "Found relevant files in untared remote backups." # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 @@ -39,4 +36,28 @@ docker run --rm \ pass "Remote backups have not been deleted." +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create first backup with no prune" +docker compose exec backup backup + +docker run --rm \ + -v minio_backup_data:/minio_data \ + alpine \ + ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz' + +info "Create second backup and prune" +docker compose exec backup backup + +docker run --rm \ + -v minio_backup_data:/minio_data \ + alpine \ + ash -c 'test ! -f /minio_data/backup/test-hostnametoken-old.tar.gz && test -f /minio_data/backup/test-hostnametoken.tar.gz' + +pass "Old remote backup has been pruned, new one is still present." + docker compose down --volumes diff --git a/test/ssh/run.sh b/test/ssh/run.sh old mode 100755 new mode 100644 index c86fbd8..c8bc1a9 --- a/test/ssh/run.sh +++ b/test/ssh/run.sh @@ -8,7 +8,7 @@ current_test=$(basename $(pwd)) ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local" -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup @@ -26,7 +26,6 @@ pass "Found relevant files in decrypted and untared remote backups." # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 @@ -39,5 +38,31 @@ docker run --rm \ pass "Remote backups have not been deleted." +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create first backup with no prune" +docker compose exec backup backup + +# Set the modification date of the old backup to 14 days ago +docker run --rm \ + -v ssh_backup_data:/ssh_data \ + --user 1000 \ + alpine \ + ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /ssh_data/test-hostnametoken-old.tar.gz' + +info "Create second backup and prune" +docker compose exec backup backup + +docker run --rm \ + -v ssh_backup_data:/ssh_data \ + alpine \ + ash -c 'test ! -f /ssh_data/test-hostnametoken-old.tar.gz && test -f /ssh_data/test-hostnametoken.tar.gz' + +pass "Old remote backup has been pruned, new one is still present." + docker compose down --volumes rm -f id_rsa id_rsa.pub diff --git a/test/user/run.sh b/test/user/run.sh index 26ce8e5..75be98d 100644 --- a/test/user/run.sh +++ b/test/user/run.sh @@ -6,7 +6,7 @@ cd $(dirname $0) . ../util.sh current_test=$(basename $(pwd)) -docker compose up -d +docker compose up -d --quiet-pull user_name=testuser docker exec user-alpine-1 adduser --disabled-password "$user_name" diff --git a/test/webdav/run.sh b/test/webdav/run.sh old mode 100755 new mode 100644 index 9a4282d..579e354 --- a/test/webdav/run.sh +++ b/test/webdav/run.sh @@ -6,7 +6,7 @@ cd "$(dirname "$0")" . ../util.sh current_test=$(basename $(pwd)) -docker compose up -d +docker compose up -d --quiet-pull sleep 5 docker compose exec backup backup @@ -24,7 +24,6 @@ pass "Found relevant files in untared remote backup." # The second part of this test checks if backups get deleted when the retention # is set to 0 days (which it should not as it would mean all backups get deleted) -# TODO: find out if we can test actual deletion without having to wait for a day BACKUP_RETENTION_DAYS="0" docker compose up -d sleep 5 @@ -37,4 +36,30 @@ docker run --rm \ pass "Remote backups have not been deleted." +# The third part of this test checks if old backups get deleted when the retention +# is set to 7 days (which it should) + +BACKUP_RETENTION_DAYS="7" docker compose up -d +sleep 5 + +info "Create first backup with no prune" +docker compose exec backup backup + +# Set the modification date of the old backup to 14 days ago +docker run --rm \ + -v webdav_backup_data:/webdav_data \ + --user 82 \ + alpine \ + ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz' + +info "Create second backup and prune" +docker compose exec backup backup + +docker run --rm \ + -v webdav_backup_data:/webdav_data \ + alpine \ + ash -c 'test ! -f /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz && test -f /webdav_data/data/my/new/path/test-hostnametoken.tar.gz' + +pass "Old remote backup has been pruned, new one is still present." + docker compose down --volumes diff --git a/test/zstd/run.sh b/test/zstd/run.sh index 9c65df9..53da830 100755 --- a/test/zstd/run.sh +++ b/test/zstd/run.sh @@ -11,7 +11,7 @@ docker volume create app_data mkdir -p local -docker run -d \ +docker run -d -q \ --name offen \ --network test_network \ -v app_data:/var/opt/offen/ \ @@ -19,7 +19,7 @@ docker run -d \ sleep 10 -docker run --rm \ +docker run --rm -q \ --network test_network \ -v app_data:/backup/app_data \ -v ./local:/archive \