diff --git a/README.md b/README.md index b781994..c655c77 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Backup Docker volumes locally or to any S3 compatible storage. The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup. -It handles __recurring or one-off backups of Docker volumes__ to a __local directory__ or __any S3 compatible storage__ (or both), and __rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__. +It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and __rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__. @@ -28,6 +28,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc - [Recipes](#recipes) - [Backing up to AWS S3](#backing-up-to-aws-s3) - [Backing up to MinIO](#backing-up-to-minio) + - [Backing up to WebDAV](#backing-up-to-webdav) - [Backing up locally](#backing-up-locally) - [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally) - [Running on a custom cron schedule](#running-on-a-custom-cron-schedule) @@ -189,6 +190,24 @@ You can populate below template according to your requirements and use it as you # AWS_ENDPOINT_INSECURE="true" +# In addition, you can also backup files to any WebDAV server. +# The URL of the remote WebDAV server + +# WEBDAV_URL="https://webdav.example.com" + +# The Directory to place the backups to on the WebDAV server. +# If the path is not present on the server it will be created! + +# WEBDAV_PATH="/my/directory/" + +# The username for the WebDAV server + +# WEBDAV_USERNAME="user" + +# The password for the WebDAV server + +# WEBDAV_PASSWORD="password" + # In addition to storing backups remotely, you can also keep local copies. # Pass a container-local path to store your backups if needed. You also need to # mount a local folder or Docker volume into that location (`/archive` @@ -530,6 +549,28 @@ volumes: data: ``` +### Backing up to WebDAV + +```yml +version: '3' + +services: + # ... define other services using the `data` volume here + backup: + image: offen/docker-volume-backup:latest + environment: + WEBDAV_URL: https://webdav.mydomain.me + WEBDAV_PATH: /my/directory/ + WEBDAV_USERNAME: user + WEBDAV_PASSWORD: password + volumes: + - data:/backup/my-app-backup:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + +volumes: + data: +``` + ### Backing up locally ```yml diff --git a/cmd/backup/main.go b/cmd/backup/main.go index 300c213..e938631 100644 --- a/cmd/backup/main.go +++ b/cmd/backup/main.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path" "path/filepath" @@ -32,6 +33,7 @@ import ( "github.com/otiai10/copy" "github.com/sirupsen/logrus" "golang.org/x/crypto/openpgp" + "github.com/studio-b12/gowebdav" ) func main() { @@ -86,12 +88,13 @@ func main() { // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - mc *minio.Client - logger *logrus.Logger - sender *router.ServiceRouter - hooks []hook - hookLevel hookLevel + cli *client.Client + mc *minio.Client + webdavClient *gowebdav.Client + logger *logrus.Logger + sender *router.ServiceRouter + hooks []hook + hookLevel hookLevel start time.Time file string @@ -127,6 +130,10 @@ type config struct { EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"` EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"` EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"` + WebdavUrl string `split_words:"true"` + WebdavPath string `split_words:"true" default:"/"` + WebdavUsername string `split_words:"true"` + WebdavPassword string `split_words:"true"` } var msgBackupFailed = "backup run failed" @@ -209,6 +216,17 @@ func newScript() (*script, error) { s.mc = mc } + // WebDAV check for env variables + // WebDAV instanciate client + if s.c.WebdavUrl != "" { + if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { + return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + } else { + webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) + s.webdavClient = webdavClient + } + } + if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( "smtp://%s:%s@%s:%d/?from=%s&to=%s", @@ -517,6 +535,21 @@ func (s *script) copyBackup() error { s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName) } + // WebDAV file upload + if s.webdavClient != nil { + bytes, err := os.ReadFile(s.file) + if err != nil { + return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + } + if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err) + } + if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil { + return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) + } + s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath) + } + if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) @@ -551,6 +584,7 @@ func (s *script) pruneOldBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)) + // Prune minio/S3 backups if s.mc != nil { candidates := s.mc.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{ WithMetadata: true, @@ -612,6 +646,38 @@ func (s *script) pruneOldBackups() error { } } + // Prune WebDAV backups + if s.webdavClient != nil { + candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath) + if err != nil { + return fmt.Errorf("pruneOldBackups: error looking up candidates from remote storage: %w", err) + } + var matches []fs.FileInfo + var lenCandidates int + for _, candidate := range candidates { + lenCandidates++ + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + if len(matches) != 0 && len(matches) != lenCandidates { + for _, match := range matches { + if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil { + return fmt.Errorf("pruneOldBackups: error removing a file from remote storage: %w", err) + } + s.logger.Infof("Pruned %s from WebDAV: %s", match.Name(), filepath.Join(s.c.WebdavUrl, s.c.WebdavPath)) + } + s.logger.Infof("Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period of %d days.", len(matches), lenCandidates, s.c.BackupRetentionDays) + } else if len(matches) != 0 && len(matches) == lenCandidates { + s.logger.Warnf("The current configuration would delete all %d remote backup copies.", len(matches)) + s.logger.Warn("Refusing to do so, please check your configuration.") + } else { + s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates) + } + } + + // Prune local backups if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { globPattern := path.Join( s.c.BackupArchive, diff --git a/go.mod b/go.mod index 370eaae..f7adcc0 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/opencontainers/image-spec v1.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/rs/xid v1.3.0 // indirect + github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect golang.org/x/text v0.3.6 // indirect diff --git a/go.sum b/go.sum index fec4db0..6f1cf35 100644 --- a/go.sum +++ b/go.sum @@ -659,6 +659,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f h1:L2NE7BXnSlSLoNYZ0lCwZDjdnYjCNYC71k9ClZUTFTs= +github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= diff --git a/test/compose/docker-compose.yml b/test/compose/docker-compose.yml index 25db200..7ee40f8 100644 --- a/test/compose/docker-compose.yml +++ b/test/compose/docker-compose.yml @@ -10,13 +10,23 @@ services: MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data' volumes: - - backup_data:/data + - minio_backup_data:/data + + webdav: + image: bytemark/webdav:2.4 + environment: + AUTH_TYPE: Digest + USERNAME: test + PASSWORD: test + volumes: + - webdav_backup_data:/var/lib/dav backup: &default_backup_service image: offen/docker-volume-backup:${TEST_VERSION} hostname: hostnametoken depends_on: - minio + - webdav restart: always environment: AWS_ACCESS_KEY_ID: test @@ -32,6 +42,10 @@ services: BACKUP_PRUNING_LEEWAY: 5s BACKUP_PRUNING_PREFIX: test GPG_PASSPHRASE: 1234secret + WEBDAV_URL: http://webdav/ + WEBDAV_PATH: /my/new/path/ + WEBDAV_USERNAME: test + WEBDAV_PASSWORD: test volumes: - ./local:/archive - app_data:/backup/app_data:ro @@ -45,5 +59,6 @@ services: - app_data:/var/opt/offen volumes: - backup_data: + minio_backup_data: + webdav_backup_data: app_data: diff --git a/test/compose/run.sh b/test/compose/run.sh index cca19b2..0d6a226 100755 --- a/test/compose/run.sh +++ b/test/compose/run.sh @@ -13,10 +13,13 @@ docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link docker-compose exec backup backup docker run --rm -it \ - -v compose_backup_data:/data alpine \ - ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' + -v compose_minio_backup_data:/minio_data \ + -v compose_webdav_backup_data:/webdav_data alpine \ + ash -c 'apk add gnupg && \ + echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \ + echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' -echo "[TEST:PASS] Found relevant files in untared remote backup." +echo "[TEST:PASS] Found relevant files in untared remote backups." test -L ./local/test-hostnametoken.latest.tar.gz.gpg echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz @@ -26,7 +29,7 @@ test -L /tmp/backup/app_data/db.link echo "[TEST:PASS] Found relevant files in untared local backup." -if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then +if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:" docker-compose ps exit 1 @@ -43,8 +46,10 @@ sleep 5 docker-compose exec backup backup docker run --rm -it \ - -v compose_backup_data:/data alpine \ - ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]' + -v compose_minio_backup_data:/minio_data \ + -v compose_webdav_backup_data:/webdav_data alpine \ + ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \ + [ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]' echo "[TEST:PASS] Remote backups have not been deleted."