From a253fdfbecc1c474f1f9c247549f15d2dd19fc77 Mon Sep 17 00:00:00 2001 From: Frederik Ring Date: Fri, 23 Dec 2022 10:31:25 +0100 Subject: [PATCH] Set up automated testing for Azure Storage --- cmd/backup/config.go | 2 +- cmd/backup/script.go | 1 + internal/storage/azure/azure.go | 6 ++-- test/azure/docker-compose.yml | 57 +++++++++++++++++++++++++++++++++ test/azure/run.sh | 40 +++++++++++++++++++++++ 5 files changed, 103 insertions(+), 3 deletions(-) create mode 100644 test/azure/docker-compose.yml create mode 100644 test/azure/run.sh diff --git a/cmd/backup/config.go b/cmd/backup/config.go index f293bdc..09ef3d2 100644 --- a/cmd/backup/config.go +++ b/cmd/backup/config.go @@ -66,7 +66,7 @@ type Config struct { AzureStorageAccountName string `split_words:"true"` AzureStoragePrimaryAccountKey string `split_words:"true"` AzureStorageContainerName string `split_words:"true"` - AzureStorageEndpoint string `split_words:"true" default:""https://%s.blob.core.windows.net/""` + AzureStorageEndpoint string `split_words:"true" default:"https://%s.blob.core.windows.net/"` } func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) { diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 6ea5dd4..379df28 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -196,6 +196,7 @@ func newScript() (*script, error) { ContainerName: s.c.AzureStorageContainerName, AccountName: s.c.AzureStorageAccountName, PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey, + Endpoint: s.c.AzureStorageEndpoint, } azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc) if err != nil { diff --git a/internal/storage/azure/azure.go b/internal/storage/azure/azure.go index 1034076..82165ce 100644 --- a/internal/storage/azure/azure.go +++ b/internal/storage/azure/azure.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "os" + "path" "time" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" @@ -55,9 +56,10 @@ func (b *azureBlobStorage) Copy(file string) error { if err != nil { return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err) } + _, err = b.client.UploadStream(context.TODO(), b.containerName, - file, + path.Base(file), fileReader, nil, ) @@ -69,5 +71,5 @@ func (b *azureBlobStorage) Copy(file string) error { // Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend. func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { - return nil, nil + return &storage.PruneStats{}, nil } diff --git a/test/azure/docker-compose.yml b/test/azure/docker-compose.yml new file mode 100644 index 0000000..5e3ff01 --- /dev/null +++ b/test/azure/docker-compose.yml @@ -0,0 +1,57 @@ +version: '3' + +services: + storage: + image: mcr.microsoft.com/azure-storage/azurite + volumes: + - ./foo:/data + command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data + healthcheck: + test: nc 127.0.0.1 10000 -z + interval: 1s + retries: 30 + + az_cli: + image: mcr.microsoft.com/azure-cli + volumes: + - ./local:/dump + command: + - /bin/sh + - -c + - | + az storage container create --name test-container + depends_on: + storage: + condition: service_healthy + environment: + AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1; + + backup: + image: offen/docker-volume-backup:${TEST_VERSION:-canary} + hostname: hostnametoken + restart: always + environment: + AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1 + AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + AZURE_STORAGE_CONTAINER_NAME: test-container + AZURE_STORAGE_ENDPOINT: http://storage:10000/%s/ + BACKUP_FILENAME: test.tar.gz + BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ? + BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7} + BACKUP_PRUNING_LEEWAY: 5s + BACKUP_PRUNING_PREFIX: test + volumes: + - app_data:/backup/app_data:ro + - /var/run/docker.sock:/var/run/docker.sock + + offen: + image: offen/offen:latest + labels: + - docker-volume-backup.stop-during-backup=true + volumes: + - app_data:/var/opt/offen + +volumes: + azurite_backup_data: + name: azurite_backup_data + app_data: diff --git a/test/azure/run.sh b/test/azure/run.sh new file mode 100644 index 0000000..f380ab9 --- /dev/null +++ b/test/azure/run.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +set -e + +cd "$(dirname "$0")" +. ../util.sh +current_test=$(basename $(pwd)) + +docker-compose up -d +sleep 5 + +# A symlink for a known file in the volume is created so the test can check +# whether symlinks are preserved on backup. +docker-compose exec backup backup + +sleep 5 + +expect_running_containers "3" + +docker-compose run --rm az_cli \ + az storage blob download -f /dump/test.tar.gz -c test-container -n test.tar.gz +tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db + +pass "Found relevant files in untared remote backups." + +# The second part of this test checks if backups get deleted when the retention +# is set to 0 days (which it should not as it would mean all backups get deleted) +# TODO: find out if we can test actual deletion without having to wait for a day +BACKUP_RETENTION_DAYS="0" docker-compose up -d +sleep 5 + +docker-compose exec backup backup + +docker-compose run --rm az_cli \ + az storage blob download -f /dump/test.tar.gz -c test-container -n test.tar.gz +test -f ./local/test.tar.gz + +pass "Remote backups have not been deleted." + +docker-compose down --volumes