Added abstract helper interface for all storage backends (#135)

* Added abstract helper interface and implemented it for all storage backends

* Moved storage client initializations also to helper classes

* Fixed ssh init issue

* Moved script parameter to helper struct to simplify script init.

* Created sub modules. Enhanced abstract implementation.

* Fixed config issue

* Fixed declaration issues. Added config to interface.

* Added StorageProviders to unify all backends.

* Cleanup, optimizations, comments.

* Applied discussed changes. See description.

Moved modules to internal packages.
Replaced StoragePool with slice.
Moved conditional for init of storage backends back to script.

* Fix docker build issue

* Fixed accidentally removed local copy condition.

* Delete .gitignore

* Renaming/changes according to review

Renamed Init functions and interface.
Replaced config object with specific config values.
Init func returns interface instead of struct.
Removed custom import names where possible.

* Fixed auto-complete error.

* Combined copy instructions into one layer.

* Added logging func for storages.

* Introduced logging func for errors too.

* Missed an error message

* Moved config back to main. Optimized prune stats handling.

* Move stats back to main package

* Code doc stuff

* Apply changes from #136

* Replace name field with function.

* Changed receiver names from stg to b.

* Renamed LogFuncDef to Log

* Removed redundant package name.

* Renamed storagePool to storages.

* Simplified creation of new storage backend.

* Added initialization for storage stats map.

* Invert .dockerignore patterns.

* Fix package typo
This commit is contained in:
MaxJa4 2022-08-18 08:52:09 +02:00 committed by Frederik Ring
parent 4ec88d14dd
commit 279844ccfb
14 changed files with 740 additions and 438 deletions

View File

@ -1 +1,19 @@
test # Ignore everything
*
# Exceptions:
# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks!
!cmd/backup/*.go
!cmd/backup/*.tmpl
!internal/storage/*.go
!internal/storage/local/*.go
!internal/storage/s3/*.go
!internal/storage/ssh/*.go
!internal/storage/webdav/*.go
!internal/utilities/*.go
!Dockerfile
!entrypoint.sh
!go.*

View File

@ -4,9 +4,8 @@
FROM golang:1.18-alpine as builder FROM golang:1.18-alpine as builder
WORKDIR /app WORKDIR /app
COPY go.mod go.sum ./ COPY . .
RUN go mod download RUN go mod download
COPY cmd/backup ./cmd/backup/
WORKDIR /app/cmd/backup WORKDIR /app/cmd/backup
RUN go build -o backup . RUN go build -o backup .

View File

@ -12,6 +12,15 @@ import (
// Config holds all configuration values that are expected to be set // Config holds all configuration values that are expected to be set
// by users. // by users.
type Config struct { type Config struct {
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"` BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"` BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"` BackupFilenameExpand bool `split_words:"true"`
@ -23,15 +32,6 @@ type Config struct {
BackupStopContainerLabel string `split_words:"true" default:"true"` BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"` BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"` BackupExcludeRegexp RegexpDecoder `split_words:"true"`
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
GpgPassphrase string `split_words:"true"` GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"` NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"` NotificationLevel string `split_words:"true" default:"error"`

View File

@ -6,6 +6,8 @@ package main
import ( import (
"fmt" "fmt"
"sort" "sort"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
// hook contains a queued action that can be trigger them when the script // hook contains a queued action that can be trigger them when the script
@ -50,7 +52,7 @@ func (s *script) runHooks(err error) error {
} }
} }
if len(actionErrors) != 0 { if len(actionErrors) != 0 {
return join(actionErrors...) return utilities.Join(actionErrors...)
} }
return nil return nil
} }

View File

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/gofrs/flock" "github.com/gofrs/flock"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
// lock opens a lockfile at the given location, keeping it locked until the // lock opens a lockfile at the given location, keeping it locked until the
@ -31,7 +32,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
for { for {
acquired, err := fileLock.TryLock() acquired, err := fileLock.TryLock()
if err != nil { if err != nil {
return noop, fmt.Errorf("lock: error trying lock: %w", err) return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err)
} }
if acquired { if acquired {
if s.encounteredLock { if s.encounteredLock {
@ -52,7 +53,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
case <-retry.C: case <-retry.C:
continue continue
case <-deadline.C: case <-deadline.C:
return noop, errors.New("lock: timed out waiting for lockfile to become available") return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available")
} }
} }
} }

View File

@ -12,6 +12,7 @@ import (
"time" "time"
sTypes "github.com/containrrr/shoutrrr/pkg/types" sTypes "github.com/containrrr/shoutrrr/pkg/types"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
//go:embed notifications.tmpl //go:embed notifications.tmpl
@ -68,7 +69,7 @@ func (s *script) sendNotification(title, body string) error {
} }
} }
if len(errs) != 0 { if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...)) return fmt.Errorf("sendNotification: error sending message: %w", utilities.Join(errs...))
} }
return nil return nil
} }

View File

@ -5,19 +5,22 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
"io/ioutil"
"net/http"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strings"
"text/template" "text/template"
"time" "time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/storage/local"
"github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/offen/docker-volume-backup/internal/utilities"
"github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router" "github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
@ -26,29 +29,21 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/kelseyhightower/envconfig" "github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil" "github.com/leekchan/timeutil"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/otiai10/copy" "github.com/otiai10/copy"
"github.com/pkg/sftp"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/studio-b12/gowebdav"
"golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp"
"golang.org/x/crypto/ssh"
) )
// script holds all the stateful information required to orchestrate a // script holds all the stateful information required to orchestrate a
// single backup run. // single backup run.
type script struct { type script struct {
cli *client.Client cli *client.Client
minioClient *minio.Client storages []storage.Backend
webdavClient *gowebdav.Client logger *logrus.Logger
sshClient *ssh.Client sender *router.ServiceRouter
sftpClient *sftp.Client template *template.Template
logger *logrus.Logger hooks []hook
sender *router.ServiceRouter hookLevel hookLevel
template *template.Template
hooks []hook
hookLevel hookLevel
file string file string
stats *Stats stats *Stats
@ -63,7 +58,7 @@ type script struct {
// reading from env vars or other configuration sources is expected to happen // reading from env vars or other configuration sources is expected to happen
// in this method. // in this method.
func newScript() (*script, error) { func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout) stdOut, logBuffer := utilities.Buffer(os.Stdout)
s := &script{ s := &script{
c: &Config{}, c: &Config{},
logger: &logrus.Logger{ logger: &logrus.Logger{
@ -75,7 +70,12 @@ func newScript() (*script, error) {
stats: &Stats{ stats: &Stats{
StartTime: time.Now(), StartTime: time.Now(),
LogOutput: logBuffer, LogOutput: logBuffer,
Storages: StoragesStats{}, Storages: map[string]StorageStats{
"S3": {},
"WebDav": {},
"SSH": {},
"Local": {},
},
}, },
} }
@ -107,114 +107,56 @@ func newScript() (*script, error) {
s.cli = cli s.cli = cli
} }
logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error {
var allParams []interface{}
allParams = append(allParams, context)
allParams = append(allParams, params...)
switch logType {
case storage.INFO:
s.logger.Infof("[%s] "+msg, allParams...)
return nil
case storage.WARNING:
s.logger.Warnf("[%s] "+msg, allParams...)
return nil
case storage.ERROR:
return fmt.Errorf("[%s] "+msg, allParams...)
default:
s.logger.Warnf("[%s] "+msg, allParams...)
return nil
}
}
if s.c.AwsS3BucketName != "" { if s.c.AwsS3BucketName != "" {
var creds *credentials.Credentials if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint,
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil {
creds = credentials.NewStaticV4( return nil, err
s.c.AwsAccessKeyID,
s.c.AwsSecretAccessKey,
"",
)
} else if s.c.AwsIamRoleEndpoint != "" {
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
} else { } else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") s.storages = append(s.storages, s3Backend)
} }
options := minio.Options{
Creds: creds,
Secure: s.c.AwsEndpointProto == "https",
}
if s.c.AwsEndpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("newScript: failed to create default minio transport")
}
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(s.c.AwsEndpoint, &options)
if err != nil {
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
}
s.minioClient = mc
} }
if s.c.WebdavUrl != "" { if s.c.WebdavUrl != "" {
if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword,
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") s.c.WebdavUrlInsecure, logFunc); err != nil {
return nil, err
} else { } else {
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) s.storages = append(s.storages, webdavBackend)
s.webdavClient = webdavClient
if s.c.WebdavUrlInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure
s.webdavClient.SetTransport(webdavTransport)
}
} }
} }
if s.c.SSHHostName != "" { if s.c.SSHHostName != "" {
var authMethods []ssh.AuthMethod if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile,
s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil {
if s.c.SSHPassword != "" {
authMethods = append(authMethods, ssh.Password(s.c.SSHPassword))
}
if _, err := os.Stat(s.c.SSHIdentityFile); err == nil {
key, err := ioutil.ReadFile(s.c.SSHIdentityFile)
if err != nil {
return nil, errors.New("newScript: error reading the private key")
}
var signer ssh.Signer
if s.c.SSHIdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase))
if err != nil {
return nil, errors.New("newScript: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("newScript: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: s.c.SSHUser,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig)
s.sshClient = sshClient
if err != nil {
return nil, fmt.Errorf("newScript: error creating ssh client: %w", err)
}
_, _, err = s.sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err return nil, err
} } else {
s.storages = append(s.storages, sshBackend)
sftpClient, err := sftp.NewClient(sshClient)
s.sftpClient = sftpClient
if err != nil {
return nil, fmt.Errorf("newScript: error creating sftp client: %w", err)
} }
} }
localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc)
s.storages = append(s.storages, localBackend)
if s.c.EmailNotificationRecipient != "" { if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf( emailURL := fmt.Sprintf(
"smtp://%s:%s@%s:%d/?from=%s&to=%s", "smtp://%s:%s@%s:%d/?from=%s&to=%s",
@ -286,14 +228,14 @@ func newScript() (*script, error) {
// restart everything that has been stopped. // restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) { func (s *script) stopContainers() (func() error, error) {
if s.cli == nil { if s.cli == nil {
return noop, nil return utilities.Noop, nil
} }
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true, Quiet: true,
}) })
if err != nil { if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
} }
containerLabel := fmt.Sprintf( containerLabel := fmt.Sprintf(
@ -309,11 +251,11 @@ func (s *script) stopContainers() (func() error, error) {
}) })
if err != nil { if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
} }
if len(containersToStop) == 0 { if len(containersToStop) == 0 {
return noop, nil return utilities.Noop, nil
} }
s.logger.Infof( s.logger.Infof(
@ -338,7 +280,7 @@ func (s *script) stopContainers() (func() error, error) {
stopError = fmt.Errorf( stopError = fmt.Errorf(
"stopContainersAndRun: %d error(s) stopping containers: %w", "stopContainersAndRun: %d error(s) stopping containers: %w",
len(stopErrors), len(stopErrors),
join(stopErrors...), utilities.Join(stopErrors...),
) )
} }
@ -389,7 +331,7 @@ func (s *script) stopContainers() (func() error, error) {
return fmt.Errorf( return fmt.Errorf(
"stopContainersAndRun: %d error(s) restarting containers and services: %w", "stopContainersAndRun: %d error(s) restarting containers and services: %w",
len(restartErrors), len(restartErrors),
join(restartErrors...), utilities.Join(restartErrors...),
) )
} }
s.logger.Infof( s.logger.Infof(
@ -415,7 +357,7 @@ func (s *script) createArchive() error {
backupSources = filepath.Join("/tmp", s.c.BackupSources) backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing. // copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(backupSources); err != nil { if err := utilities.Remove(backupSources); err != nil {
return fmt.Errorf("takeBackup: error removing snapshot: %w", err) return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
} }
s.logger.Infof("Removed snapshot `%s`.", backupSources) s.logger.Infof("Removed snapshot `%s`.", backupSources)
@ -432,7 +374,7 @@ func (s *script) createArchive() error {
tarFile := s.file tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(tarFile); err != nil { if err := utilities.Remove(tarFile); err != nil {
return fmt.Errorf("takeBackup: error removing tar file: %w", err) return fmt.Errorf("takeBackup: error removing tar file: %w", err)
} }
s.logger.Infof("Removed tar file `%s`.", tarFile) s.logger.Infof("Removed tar file `%s`.", tarFile)
@ -477,7 +419,7 @@ func (s *script) encryptArchive() error {
gpgFile := fmt.Sprintf("%s.gpg", s.file) gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(gpgFile); err != nil { if err := utilities.Remove(gpgFile); err != nil {
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
} }
s.logger.Infof("Removed GPG file `%s`.", gpgFile) s.logger.Infof("Removed GPG file `%s`.", gpgFile)
@ -485,20 +427,20 @@ func (s *script) encryptArchive() error {
}) })
outFile, err := os.Create(gpgFile) outFile, err := os.Create(gpgFile)
defer outFile.Close()
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err) return fmt.Errorf("encryptBackup: error opening out file: %w", err)
} }
defer outFile.Close()
_, name := path.Split(s.file) _, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{ dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true, IsBinary: true,
FileName: name, FileName: name,
}, nil) }, nil)
defer dst.Close()
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
} }
defer dst.Close()
src, err := os.Open(s.file) src, err := os.Open(s.file)
if err != nil { if err != nil {
@ -529,93 +471,12 @@ func (s *script) copyArchive() error {
} }
} }
if s.minioClient != nil { for _, backend := range s.storages {
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{ if err := backend.Copy(s.file); err != nil {
ContentType: "application/tar+gzip", return err
StorageClass: s.c.AwsStorageClass,
}); err != nil {
errResp := minio.ToErrorResponse(err)
return fmt.Errorf("copyBackup: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
}
if s.webdavClient != nil {
bytes, err := os.ReadFile(s.file)
if err != nil {
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
}
if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil {
return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err)
}
if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil {
return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err)
}
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
}
if s.sshClient != nil {
source, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name))
if err != nil {
return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("sshClient: failed to write stream")
}
break
}
if err != nil {
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("sshClient: failed to write stream")
}
}
s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath)
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
if s.c.BackupLatestSymlink != "" {
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
}
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
} }
} }
return nil return nil
} }
@ -629,208 +490,18 @@ func (s *script) pruneBackups() error {
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
// doPrune holds general control flow that applies to any kind of storage. for _, backend := range s.storages {
// Callers can pass in a thunk that performs the actual deletion of files. if stats, err := backend.Prune(deadline, s.c.BackupPruningPrefix); err == nil {
var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { s.stats.Storages[backend.Name()] = StorageStats{
if lenMatches != 0 && lenMatches != lenCandidates { Total: stats.Total,
if err := doRemoveFiles(); err != nil { Pruned: stats.Pruned,
return err
} }
s.logger.Infof(
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
s.c.BackupRetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description)
s.logger.Warn("Refusing to do so, please check your configuration.")
} else { } else {
s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) return err
} }
return nil
} }
if s.minioClient != nil {
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return fmt.Errorf(
"pruneBackups: error looking up candidates from remote storage: %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.S3 = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return join(removeErrors...)
}
return nil
})
}
if s.webdavClient != nil {
candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath)
if err != nil {
return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.WebDAV = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil {
return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err)
}
}
return nil
})
}
if s.sshClient != nil {
candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath)
if err != nil {
return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
s.stats.Storages.SSH = StorageStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
doPrune(len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil {
return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err)
}
}
return nil
})
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
globPattern := path.Join(
s.c.BackupArchive,
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf(
"pruneBackups: error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.Local = StorageStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
doPrune(len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"pruneBackups: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
join(removeErrors...),
)
}
return nil
})
}
return nil return nil
} }

View File

@ -30,14 +30,6 @@ type StorageStats struct {
PruneErrors uint PruneErrors uint
} }
// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3)
type StoragesStats struct {
Local StorageStats
WebDAV StorageStats
SSH StorageStats
S3 StorageStats
}
// Stats global stats regarding script execution // Stats global stats regarding script execution
type Stats struct { type Stats struct {
StartTime time.Time StartTime time.Time
@ -47,5 +39,5 @@ type Stats struct {
LogOutput *bytes.Buffer LogOutput *bytes.Buffer
Containers ContainersStats Containers ContainersStats
BackupFile BackupFileStats BackupFile BackupFileStats
Storages StoragesStats Storages map[string]StorageStats
} }

View File

@ -0,0 +1,131 @@
package local
import (
"fmt"
"os"
"path"
"path/filepath"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
)
type localStorage struct {
*storage.StorageBackend
latestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend {
return &localStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: archivePath,
Log: logFunc,
},
latestSymlink: latestSymlink,
}
}
// Name return the name of the storage backend
func (b *localStorage) Name() string {
return "Local"
}
// Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error {
if _, err := os.Stat(b.DestinationPath); os.IsNotExist(err) {
return nil
}
_, name := path.Split(file)
if err := utilities.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err)
}
b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: error creating latest symlink! %w", err)
}
b.Log(storage.INFO, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
}
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the local storage backend.
func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
globPattern := path.Join(
b.DestinationPath,
fmt.Sprintf("%s*", pruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return nil, b.Log(storage.ERROR, b.Name(),
"Prune: Error looking up matching files using pattern %s! %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return nil, b.Log(storage.ERROR, b.Name(),
"Prune: Error calling Lstat on file %s! %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return nil, b.Log(storage.ERROR, b.Name(),
"Prune: Error calling stat on file %s! %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return b.Log(storage.ERROR, b.Name(),
"Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
utilities.Join(removeErrors...),
)
}
return nil
})
return stats, nil
}

145
internal/storage/s3/s3.go Normal file
View File

@ -0,0 +1,145 @@
package s3
import (
"context"
"errors"
"path"
"path/filepath"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
)
type s3Storage struct {
*storage.StorageBackend
client *minio.Client
bucket string
storageClass string
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool,
remotePath string, bucket string, storageClass string, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials
if accessKeyId != "" && secretAccessKey != "" {
creds = credentials.NewStaticV4(
accessKeyId,
secretAccessKey,
"",
)
} else if iamRoleEndpoint != "" {
creds = credentials.NewIAM(iamRoleEndpoint)
} else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: endpointProto == "https",
}
if endpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, logFunc(storage.ERROR, "S3", "NewScript: failed to create default minio transport")
}
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(endpoint, &options)
if err != nil {
return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err)
}
return &s3Storage{
StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath,
Log: logFunc,
},
client: mc,
bucket: bucket,
storageClass: storageClass,
}, nil
}
// Name returns the name of the storage backend
func (v *s3Storage) Name() string {
return "S3"
}
// Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file)
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
StorageClass: b.storageClass,
}); err != nil {
errResp := minio.ToErrorResponse(err)
return b.Log(storage.ERROR, b.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return nil, b.Log(storage.ERROR, b.Name(),
"Prune: Error looking up candidates from remote storage! %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return utilities.Join(removeErrors...)
}
return nil
})
return stats, nil
}

176
internal/storage/ssh/ssh.go Normal file
View File

@ -0,0 +1,176 @@
package ssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sshStorage struct {
*storage.StorageBackend
client *ssh.Client
sftpClient *sftp.Client
hostName string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string,
logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod
if password != "" {
authMethods = append(authMethods, ssh.Password(password))
}
if _, err := os.Stat(identityFile); err == nil {
key, err := ioutil.ReadFile(identityFile)
if err != nil {
return nil, errors.New("newScript: error reading the private key")
}
var signer ssh.Signer
if identityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(identityPassphrase))
if err != nil {
return nil, errors.New("newScript: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("newScript: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: user,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig)
if err != nil {
return nil, logFunc(storage.ERROR, "SSH", "NewScript: Error creating ssh client! %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err
}
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err)
}
return &sshStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath,
Log: logFunc,
},
client: sshClient,
sftpClient: sftpClient,
hostName: hostName,
}, nil
}
// Name returns the name of the storage backend
func (b *sshStorage) Name() string {
return "SSH"
}
// Copy copies the given file to the SSH storage backend.
func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error creating file on SSH storage! %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err)
}
if tot != len(chunk[:num]) {
return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream")
}
break
}
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err)
}
if tot != len(chunk[:num]) {
return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream")
}
}
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend.
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error reading directory from SSH storage! %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err)
}
}
return nil
})
return stats, nil
}

View File

@ -0,0 +1,58 @@
package storage
import (
"time"
)
// Backend is an interface for defining functions which all storage providers support.
type Backend interface {
Copy(file string) error
Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error)
Name() string
}
// StorageBackend is a generic type of storage. Everything here are common properties of all storage types.
type StorageBackend struct {
DestinationPath string
RetentionDays int
Log Log
}
type LogType string
const (
INFO LogType = "INFO"
WARNING LogType = "WARNING"
ERROR LogType = "ERROR"
)
type Log func(logType LogType, context string, msg string, params ...interface{}) error
// PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct {
Total uint
Pruned uint
}
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(INFO, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(WARNING, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(WARNING, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(INFO, context, "None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}

View File

@ -0,0 +1,108 @@
package webdav
import (
"errors"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/studio-b12/gowebdav"
)
type webDavStorage struct {
*storage.StorageBackend
client *gowebdav.Client
url string
}
// NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool,
logFunc storage.Log) (storage.Backend, error) {
if username == "" || password == "" {
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided")
} else {
webdavClient := gowebdav.NewClient(url, username, password)
if urlInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = urlInsecure
webdavClient.SetTransport(webdavTransport)
}
return &webDavStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath,
Log: logFunc,
},
client: webdavClient,
}, nil
}
}
// Name returns the name of the storage backend
func (b *webDavStorage) Name() string {
return "WebDav"
}
// Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error {
bytes, err := os.ReadFile(file)
_, name := path.Split(file)
if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err)
}
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err)
}
if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to WebDAV server! %w", err)
}
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend.
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err)
}
}
return nil
})
return stats, nil
}

View File

@ -1,7 +1,7 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de> // Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
package main package utilities
import ( import (
"bytes" "bytes"
@ -12,10 +12,10 @@ import (
"strings" "strings"
) )
var noop = func() error { return nil } var Noop = func() error { return nil }
// copy creates a copy of the file located at `dst` at `src`. // copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error { func CopyFile(src, dst string) error {
in, err := os.Open(src) in, err := os.Open(src)
if err != nil { if err != nil {
return err return err
@ -36,7 +36,7 @@ func copyFile(src, dst string) error {
} }
// join takes a list of errors and joins them into a single error // join takes a list of errors and joins them into a single error
func join(errs ...error) error { func Join(errs ...error) error {
if len(errs) == 1 { if len(errs) == 1 {
return errs[0] return errs[0]
} }
@ -51,7 +51,7 @@ func join(errs ...error) error {
} }
// remove removes the given file or directory from disk. // remove removes the given file or directory from disk.
func remove(location string) error { func Remove(location string) error {
fi, err := os.Lstat(location) fi, err := os.Lstat(location)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -72,7 +72,7 @@ func remove(location string) error {
// buffer takes an io.Writer and returns a wrapped version of the // buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer // writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) { func Buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w} buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf return buffering, &buffering.buf
} }