Set up automated testing for Azure Storage

This commit is contained in:
Frederik Ring 2022-12-23 10:31:25 +01:00
parent 7aa2166aee
commit a253fdfbec
5 changed files with 103 additions and 3 deletions

View File

@ -66,7 +66,7 @@ type Config struct {
AzureStorageAccountName string `split_words:"true"`
AzureStoragePrimaryAccountKey string `split_words:"true"`
AzureStorageContainerName string `split_words:"true"`
AzureStorageEndpoint string `split_words:"true" default:""https://%s.blob.core.windows.net/""`
AzureStorageEndpoint string `split_words:"true" default:"https://%s.blob.core.windows.net/"`
}
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {

View File

@ -196,6 +196,7 @@ func newScript() (*script, error) {
ContainerName: s.c.AzureStorageContainerName,
AccountName: s.c.AzureStorageAccountName,
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
Endpoint: s.c.AzureStorageEndpoint,
}
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
if err != nil {

View File

@ -7,6 +7,7 @@ import (
"context"
"fmt"
"os"
"path"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
@ -55,9 +56,10 @@ func (b *azureBlobStorage) Copy(file string) error {
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
}
_, err = b.client.UploadStream(context.TODO(),
b.containerName,
file,
path.Base(file),
fileReader,
nil,
)
@ -69,5 +71,5 @@ func (b *azureBlobStorage) Copy(file string) error {
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
return nil, nil
return &storage.PruneStats{}, nil
}

View File

@ -0,0 +1,57 @@
version: '3'
services:
storage:
image: mcr.microsoft.com/azure-storage/azurite
volumes:
- ./foo:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
az_cli:
image: mcr.microsoft.com/azure-cli
volumes:
- ./local:/dump
command:
- /bin/sh
- -c
- |
az storage container create --name test-container
depends_on:
storage:
condition: service_healthy
environment:
AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1;
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
AZURE_STORAGE_CONTAINER_NAME: test-container
AZURE_STORAGE_ENDPOINT: http://storage:10000/%s/
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
azurite_backup_data:
name: azurite_backup_data
app_data:

40
test/azure/run.sh Normal file
View File

@ -0,0 +1,40 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec backup backup
sleep 5
expect_running_containers "3"
docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n test.tar.gz
test -f ./local/test.tar.gz
pass "Remote backups have not been deleted."
docker-compose down --volumes