Fix WebDAV spelling, remove some inconsistencies (#143)

* Simplify logging, fix WebDAV spelling

* Define options types per package

* Move util functions that are not used cross package

* Add per file license headers

* Rename config type
This commit is contained in:
Frederik Ring 2022-08-18 10:11:13 +02:00
parent 279844ccfb
commit b60c747448
10 changed files with 307 additions and 234 deletions

View File

@ -1,19 +1,7 @@
# Ignore everything test
* .github
.circleci
# Exceptions: docs
# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks! .editorconfig
LICENSE
!cmd/backup/*.go README.md
!cmd/backup/*.tmpl
!internal/storage/*.go
!internal/storage/local/*.go
!internal/storage/s3/*.go
!internal/storage/ssh/*.go
!internal/storage/webdav/*.go
!internal/utilities/*.go
!Dockerfile
!entrypoint.sh
!go.*

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/gofrs/flock" "github.com/gofrs/flock"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
// lock opens a lockfile at the given location, keeping it locked until the // lock opens a lockfile at the given location, keeping it locked until the
@ -32,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
for { for {
acquired, err := fileLock.TryLock() acquired, err := fileLock.TryLock()
if err != nil { if err != nil {
return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err) return noop, fmt.Errorf("lock: error trying lock: %w", err)
} }
if acquired { if acquired {
if s.encounteredLock { if s.encounteredLock {
@ -53,7 +52,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
case <-retry.C: case <-retry.C:
continue continue
case <-deadline.C: case <-deadline.C:
return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available") return noop, errors.New("lock: timed out waiting for lockfile to become available")
} }
} }
} }

View File

@ -58,7 +58,7 @@ type script struct {
// reading from env vars or other configuration sources is expected to happen // reading from env vars or other configuration sources is expected to happen
// in this method. // in this method.
func newScript() (*script, error) { func newScript() (*script, error) {
stdOut, logBuffer := utilities.Buffer(os.Stdout) stdOut, logBuffer := buffer(os.Stdout)
s := &script{ s := &script{
c: &Config{}, c: &Config{},
logger: &logrus.Logger{ logger: &logrus.Logger{
@ -72,7 +72,7 @@ func newScript() (*script, error) {
LogOutput: logBuffer, LogOutput: logBuffer,
Storages: map[string]StorageStats{ Storages: map[string]StorageStats{
"S3": {}, "S3": {},
"WebDav": {}, "WebDAV": {},
"SSH": {}, "SSH": {},
"Local": {}, "Local": {},
}, },
@ -107,29 +107,31 @@ func newScript() (*script, error) {
s.cli = cli s.cli = cli
} }
logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error { logFunc := func(logType storage.LogLevel, context string, msg string, params ...interface{}) {
var allParams []interface{}
allParams = append(allParams, context)
allParams = append(allParams, params...)
switch logType { switch logType {
case storage.INFO: case storage.LogLevelWarning:
s.logger.Infof("[%s] "+msg, allParams...) s.logger.Warnf("["+context+"] "+msg, params...)
return nil case storage.LogLevelError:
case storage.WARNING: s.logger.Errorf("["+context+"] "+msg, params...)
s.logger.Warnf("[%s] "+msg, allParams...) case storage.LogLevelInfo:
return nil
case storage.ERROR:
return fmt.Errorf("[%s] "+msg, allParams...)
default: default:
s.logger.Warnf("[%s] "+msg, allParams...) s.logger.Infof("["+context+"] "+msg, params...)
return nil
} }
} }
if s.c.AwsS3BucketName != "" { if s.c.AwsS3BucketName != "" {
if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, s3Config := s3.Config{
s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil { Endpoint: s.c.AwsEndpoint,
AccessKeyID: s.c.AwsAccessKeyID,
SecretAccessKey: s.c.AwsSecretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err return nil, err
} else { } else {
s.storages = append(s.storages, s3Backend) s.storages = append(s.storages, s3Backend)
@ -137,8 +139,14 @@ func newScript() (*script, error) {
} }
if s.c.WebdavUrl != "" { if s.c.WebdavUrl != "" {
if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, webDavConfig := webdav.Config{
s.c.WebdavUrlInsecure, logFunc); err != nil { URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err return nil, err
} else { } else {
s.storages = append(s.storages, webdavBackend) s.storages = append(s.storages, webdavBackend)
@ -146,16 +154,30 @@ func newScript() (*script, error) {
} }
if s.c.SSHHostName != "" { if s.c.SSHHostName != "" {
if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, sshConfig := ssh.Config{
s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil { HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err return nil, err
} else { } else {
s.storages = append(s.storages, sshBackend) s.storages = append(s.storages, sshBackend)
} }
} }
localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc) if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
s.storages = append(s.storages, localBackend) localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}
if s.c.EmailNotificationRecipient != "" { if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf( emailURL := fmt.Sprintf(
@ -228,14 +250,14 @@ func newScript() (*script, error) {
// restart everything that has been stopped. // restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) { func (s *script) stopContainers() (func() error, error) {
if s.cli == nil { if s.cli == nil {
return utilities.Noop, nil return noop, nil
} }
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true, Quiet: true,
}) })
if err != nil { if err != nil {
return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
} }
containerLabel := fmt.Sprintf( containerLabel := fmt.Sprintf(
@ -251,11 +273,11 @@ func (s *script) stopContainers() (func() error, error) {
}) })
if err != nil { if err != nil {
return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
} }
if len(containersToStop) == 0 { if len(containersToStop) == 0 {
return utilities.Noop, nil return noop, nil
} }
s.logger.Infof( s.logger.Infof(
@ -357,7 +379,7 @@ func (s *script) createArchive() error {
backupSources = filepath.Join("/tmp", s.c.BackupSources) backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing. // copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(backupSources); err != nil { if err := remove(backupSources); err != nil {
return fmt.Errorf("takeBackup: error removing snapshot: %w", err) return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
} }
s.logger.Infof("Removed snapshot `%s`.", backupSources) s.logger.Infof("Removed snapshot `%s`.", backupSources)
@ -367,15 +389,15 @@ func (s *script) createArchive() error {
PreserveTimes: true, PreserveTimes: true,
PreserveOwner: true, PreserveOwner: true,
}); err != nil { }); err != nil {
return fmt.Errorf("takeBackup: error creating snapshot: %w", err) return fmt.Errorf("createArchive: error creating snapshot: %w", err)
} }
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources) s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
} }
tarFile := s.file tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(tarFile); err != nil { if err := remove(tarFile); err != nil {
return fmt.Errorf("takeBackup: error removing tar file: %w", err) return fmt.Errorf("createArchive: error removing tar file: %w", err)
} }
s.logger.Infof("Removed tar file `%s`.", tarFile) s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil return nil
@ -383,7 +405,7 @@ func (s *script) createArchive() error {
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources)) backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil { if err != nil {
return fmt.Errorf("takeBackup: error getting absolute path: %w", err) return fmt.Errorf("createArchive: error getting absolute path: %w", err)
} }
var filesEligibleForBackup []string var filesEligibleForBackup []string
@ -398,11 +420,11 @@ func (s *script) createArchive() error {
filesEligibleForBackup = append(filesEligibleForBackup, path) filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil return nil
}); err != nil { }); err != nil {
return fmt.Errorf("compress: error walking filesystem tree: %w", err) return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
} }
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil { if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err) return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
} }
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile) s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
@ -419,8 +441,8 @@ func (s *script) encryptArchive() error {
gpgFile := fmt.Sprintf("%s.gpg", s.file) gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error { s.registerHook(hookLevelPlumbing, func(error) error {
if err := utilities.Remove(gpgFile); err != nil { if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
} }
s.logger.Infof("Removed GPG file `%s`.", gpgFile) s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil return nil
@ -428,7 +450,7 @@ func (s *script) encryptArchive() error {
outFile, err := os.Create(gpgFile) outFile, err := os.Create(gpgFile)
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err) return fmt.Errorf("encryptArchive: error opening out file: %w", err)
} }
defer outFile.Close() defer outFile.Close()
@ -438,17 +460,17 @@ func (s *script) encryptArchive() error {
FileName: name, FileName: name,
}, nil) }, nil)
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
} }
defer dst.Close() defer dst.Close()
src, err := os.Open(s.file) src, err := os.Open(s.file)
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err) return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
} }
if _, err := io.Copy(dst, src); err != nil { if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err) return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
} }
s.file = gpgFile s.file = gpgFile
@ -461,7 +483,7 @@ func (s *script) encryptArchive() error {
func (s *script) copyArchive() error { func (s *script) copyArchive() error {
_, name := path.Split(s.file) _, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil { if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err) return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
} else { } else {
size := stat.Size() size := stat.Size()
s.stats.BackupFile = BackupFileStats{ s.stats.BackupFile = BackupFileStats{

52
cmd/backup/util.go Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"fmt"
"io"
"os"
)
var noop = func() error { return nil }
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}
// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}
type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}

View File

@ -1,7 +1,11 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package local package local
import ( import (
"fmt" "fmt"
"io"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@ -16,14 +20,20 @@ type localStorage struct {
latestSymlink string latestSymlink string
} }
// Config allows configuration of a local storage backend.
type Config struct {
ArchivePath string
LatestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend. // NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend { func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend {
return &localStorage{ return &localStorage{
StorageBackend: &storage.StorageBackend{ StorageBackend: &storage.StorageBackend{
DestinationPath: archivePath, DestinationPath: opts.ArchivePath,
Log: logFunc, Log: logFunc,
}, },
latestSymlink: latestSymlink, latestSymlink: opts.LatestSymlink,
} }
} }
@ -34,16 +44,12 @@ func (b *localStorage) Name() string {
// Copy copies the given file to the local storage backend. // Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error { func (b *localStorage) Copy(file string) error {
if _, err := os.Stat(b.DestinationPath); os.IsNotExist(err) {
return nil
}
_, name := path.Split(file) _, name := path.Split(file)
if err := utilities.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err) return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive! %w", err)
} }
b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" { if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink) symlink := path.Join(b.DestinationPath, b.latestSymlink)
@ -51,9 +57,9 @@ func (b *localStorage) Copy(file string) error {
os.Remove(symlink) os.Remove(symlink)
} }
if err := os.Symlink(name, symlink); err != nil { if err := os.Symlink(name, symlink); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: error creating latest symlink! %w", err) return fmt.Errorf("(*localStorage).Copy: error creating latest symlink! %w", err)
} }
b.Log(storage.INFO, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
} }
return nil return nil
@ -67,8 +73,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
) )
globMatches, err := filepath.Glob(globPattern) globMatches, err := filepath.Glob(globPattern)
if err != nil { if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), return nil, fmt.Errorf(
"Prune: Error looking up matching files using pattern %s! %w", "(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
globPattern, globPattern,
err, err,
) )
@ -78,8 +84,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
for _, candidate := range globMatches { for _, candidate := range globMatches {
fi, err := os.Lstat(candidate) fi, err := os.Lstat(candidate)
if err != nil { if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), return nil, fmt.Errorf(
"Prune: Error calling Lstat on file %s! %w", "(*localStorage).Prune: Error calling Lstat on file %s: %w",
candidate, candidate,
err, err,
) )
@ -94,8 +100,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
for _, candidate := range candidates { for _, candidate := range candidates {
fi, err := os.Stat(candidate) fi, err := os.Stat(candidate)
if err != nil { if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), return nil, fmt.Errorf(
"Prune: Error calling stat on file %s! %w", "(*localStorage).Prune: Error calling stat on file %s! %w",
candidate, candidate,
err, err,
) )
@ -110,7 +116,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
Pruned: uint(len(matches)), Pruned: uint(len(matches)),
} }
b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error { if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error var removeErrors []error
for _, match := range matches { for _, match := range matches {
if err := os.Remove(match); err != nil { if err := os.Remove(match); err != nil {
@ -118,14 +124,37 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
} }
} }
if len(removeErrors) != 0 { if len(removeErrors) != 0 {
return b.Log(storage.ERROR, b.Name(), return fmt.Errorf(
"Prune: %d error(s) deleting local files, starting with: %w", "(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors), len(removeErrors),
utilities.Join(removeErrors...), utilities.Join(removeErrors...),
) )
} }
return nil return nil
}) }); err != nil {
return stats, err
}
return stats, nil return stats, nil
} }
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

View File

@ -1,8 +1,12 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package s3 package s3
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"path" "path"
"path/filepath" "path/filepath"
"time" "time"
@ -20,54 +24,66 @@ type s3Storage struct {
storageClass string storageClass string
} }
// Config contains values that define the configuration of a S3 backend.
type Config struct {
Endpoint string
AccessKeyID string
SecretAccessKey string
IamRoleEndpoint string
EndpointProto string
EndpointInsecure bool
RemotePath string
BucketName string
StorageClass string
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend. // NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
remotePath string, bucket string, storageClass string, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials var creds *credentials.Credentials
if accessKeyId != "" && secretAccessKey != "" { if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
creds = credentials.NewStaticV4( creds = credentials.NewStaticV4(
accessKeyId, opts.AccessKeyID,
secretAccessKey, opts.SecretAccessKey,
"", "",
) )
} else if iamRoleEndpoint != "" { } else if opts.IamRoleEndpoint != "" {
creds = credentials.NewIAM(iamRoleEndpoint) creds = credentials.NewIAM(opts.IamRoleEndpoint)
} else { } else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
} }
options := minio.Options{ options := minio.Options{
Creds: creds, Creds: creds,
Secure: endpointProto == "https", Secure: opts.EndpointProto == "https",
} }
if endpointInsecure { if opts.EndpointInsecure {
if !options.Secure { if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
} }
transport, err := minio.DefaultTransport(true) transport, err := minio.DefaultTransport(true)
if err != nil { if err != nil {
return nil, logFunc(storage.ERROR, "S3", "NewScript: failed to create default minio transport") return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport")
} }
transport.TLSClientConfig.InsecureSkipVerify = true transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport options.Transport = transport
} }
mc, err := minio.New(endpoint, &options) mc, err := minio.New(opts.Endpoint, &options)
if err != nil { if err != nil {
return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err) return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
} }
return &s3Storage{ return &s3Storage{
StorageBackend: &storage.StorageBackend{ StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath, DestinationPath: opts.RemotePath,
Log: logFunc, Log: logFunc,
}, },
client: mc, client: mc,
bucket: bucket, bucket: opts.BucketName,
storageClass: storageClass, storageClass: opts.StorageClass,
}, nil }, nil
} }
@ -85,9 +101,9 @@ func (b *s3Storage) Copy(file string) error {
StorageClass: b.storageClass, StorageClass: b.storageClass,
}); err != nil { }); err != nil {
errResp := minio.ToErrorResponse(err) errResp := minio.ToErrorResponse(err)
return b.Log(storage.ERROR, b.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
} }
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil return nil
} }
@ -105,8 +121,8 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
for candidate := range candidates { for candidate := range candidates {
lenCandidates++ lenCandidates++
if candidate.Err != nil { if candidate.Err != nil {
return nil, b.Log(storage.ERROR, b.Name(), return nil, fmt.Errorf(
"Prune: Error looking up candidates from remote storage! %w", "(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
candidate.Err, candidate.Err,
) )
} }
@ -120,7 +136,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
Pruned: uint(len(matches)), Pruned: uint(len(matches)),
} }
b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error { if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo) objectsCh := make(chan minio.ObjectInfo)
go func() { go func() {
for _, match := range matches { for _, match := range matches {
@ -139,7 +155,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
return utilities.Join(removeErrors...) return utilities.Join(removeErrors...)
} }
return nil return nil
}) }); err != nil {
return stats, err
}
return stats, nil return stats, nil
} }

View File

@ -1,3 +1,6 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package ssh package ssh
import ( import (
@ -23,47 +26,56 @@ type sshStorage struct {
hostName string hostName string
} }
// NewStorageBackend creates and initializes a new SSH storage backend. // Config allows to configure a SSH backend.
func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, type Config struct {
logFunc storage.Log) (storage.Backend, error) { HostName string
Port string
User string
Password string
IdentityFile string
IdentityPassphrase string
RemotePath string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod var authMethods []ssh.AuthMethod
if password != "" { if opts.Password != "" {
authMethods = append(authMethods, ssh.Password(password)) authMethods = append(authMethods, ssh.Password(opts.Password))
} }
if _, err := os.Stat(identityFile); err == nil { if _, err := os.Stat(opts.IdentityFile); err == nil {
key, err := ioutil.ReadFile(identityFile) key, err := ioutil.ReadFile(opts.IdentityFile)
if err != nil { if err != nil {
return nil, errors.New("newScript: error reading the private key") return nil, errors.New("NewStorageBackend: error reading the private key")
} }
var signer ssh.Signer var signer ssh.Signer
if identityPassphrase != "" { if opts.IdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(identityPassphrase)) signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
if err != nil { if err != nil {
return nil, errors.New("newScript: error parsing the encrypted private key") return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
} }
authMethods = append(authMethods, ssh.PublicKeys(signer)) authMethods = append(authMethods, ssh.PublicKeys(signer))
} else { } else {
signer, err = ssh.ParsePrivateKey(key) signer, err = ssh.ParsePrivateKey(key)
if err != nil { if err != nil {
return nil, errors.New("newScript: error parsing the private key") return nil, errors.New("NewStorageBackend: error parsing the private key")
} }
authMethods = append(authMethods, ssh.PublicKeys(signer)) authMethods = append(authMethods, ssh.PublicKeys(signer))
} }
} }
sshClientConfig := &ssh.ClientConfig{ sshClientConfig := &ssh.ClientConfig{
User: user, User: opts.User,
Auth: authMethods, Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(), HostKeyCallback: ssh.InsecureIgnoreHostKey(),
} }
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig) sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil { if err != nil {
return nil, logFunc(storage.ERROR, "SSH", "NewScript: Error creating ssh client! %w", err) return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
} }
_, _, err = sshClient.SendRequest("keepalive", false, nil) _, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil { if err != nil {
@ -72,17 +84,17 @@ func NewStorageBackend(hostName string, port string, user string, password strin
sftpClient, err := sftp.NewClient(sshClient) sftpClient, err := sftp.NewClient(sshClient)
if err != nil { if err != nil {
return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err) return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
} }
return &sshStorage{ return &sshStorage{
StorageBackend: &storage.StorageBackend{ StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath, DestinationPath: opts.RemotePath,
Log: logFunc, Log: logFunc,
}, },
client: sshClient, client: sshClient,
sftpClient: sftpClient, sftpClient: sftpClient,
hostName: hostName, hostName: opts.HostName,
}, nil }, nil
} }
@ -96,13 +108,13 @@ func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file) source, err := os.Open(file)
_, name := path.Split(file) _, name := path.Split(file)
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded! %w", err)
} }
defer source.Close() defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name)) destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error creating file on SSH storage! %w", err) return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage! %w", err)
} }
defer destination.Close() defer destination.Close()
@ -112,31 +124,31 @@ func (b *sshStorage) Copy(file string) error {
if err == io.EOF { if err == io.EOF {
tot, err := destination.Write(chunk[:num]) tot, err := destination.Write(chunk[:num])
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err)
} }
if tot != len(chunk[:num]) { if tot != len(chunk[:num]) {
return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") return errors.New("(*sshStorage).Copy: failed to write stream")
} }
break break
} }
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err)
} }
tot, err := destination.Write(chunk[:num]) tot, err := destination.Write(chunk[:num])
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err)
} }
if tot != len(chunk[:num]) { if tot != len(chunk[:num]) {
return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
} }
} }
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil return nil
} }
@ -145,7 +157,7 @@ func (b *sshStorage) Copy(file string) error {
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath) candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil { if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error reading directory from SSH storage! %w", err) return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage! %w", err)
} }
var matches []string var matches []string
@ -163,14 +175,16 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
Pruned: uint(len(matches)), Pruned: uint(len(matches)),
} }
b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error { if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches { for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err) return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage! %w", err)
} }
} }
return nil return nil
}) }); err != nil {
return stats, err
}
return stats, nil return stats, nil
} }

View File

@ -1,3 +1,6 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package storage package storage
import ( import (
@ -18,15 +21,15 @@ type StorageBackend struct {
Log Log Log Log
} }
type LogType string type LogLevel int
const ( const (
INFO LogType = "INFO" LogLevelInfo LogLevel = iota
WARNING LogType = "WARNING" LogLevelWarning
ERROR LogType = "ERROR" LogLevelError
) )
type Log func(logType LogType, context string, msg string, params ...interface{}) error type Log func(logType LogLevel, context string, msg string, params ...interface{})
// PruneStats is a wrapper struct for returning stats after pruning // PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct { type PruneStats struct {
@ -41,7 +44,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int,
if err := doRemoveFiles(); err != nil { if err := doRemoveFiles(); err != nil {
return err return err
} }
b.Log(INFO, context, b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches, lenMatches,
lenCandidates, lenCandidates,
@ -49,10 +52,10 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int,
b.RetentionDays, b.RetentionDays,
) )
} else if lenMatches != 0 && lenMatches == lenCandidates { } else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(WARNING, context, "The current configuration would delete all %d existing %s.", lenMatches, description) b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(WARNING, context, "Refusing to do so, please check your configuration.") b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else { } else {
b.Log(INFO, context, "None of %d existing %s were pruned.", lenCandidates, description) b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
} }
return nil return nil
} }

View File

@ -1,7 +1,11 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package webdav package webdav
import ( import (
"errors" "errors"
"fmt"
"io/fs" "io/fs"
"net/http" "net/http"
"os" "os"
@ -20,28 +24,36 @@ type webDavStorage struct {
url string url string
} }
// Config allows to configure a WebDAV storage backend.
type Config struct {
URL string
RemotePath string
Username string
Password string
URLInsecure bool
}
// NewStorageBackend creates and initializes a new WebDav storage backend. // NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
logFunc storage.Log) (storage.Backend, error) {
if username == "" || password == "" { if opts.Username == "" || opts.Password == "" {
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
} else { } else {
webdavClient := gowebdav.NewClient(url, username, password) webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
if urlInsecure { if opts.URLInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport) defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok { if !ok {
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
} }
webdavTransport := defaultTransport.Clone() webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = urlInsecure webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
webdavClient.SetTransport(webdavTransport) webdavClient.SetTransport(webdavTransport)
} }
return &webDavStorage{ return &webDavStorage{
StorageBackend: &storage.StorageBackend{ StorageBackend: &storage.StorageBackend{
DestinationPath: remotePath, DestinationPath: opts.RemotePath,
Log: logFunc, Log: logFunc,
}, },
client: webdavClient, client: webdavClient,
@ -51,7 +63,7 @@ func NewStorageBackend(url string, remotePath string, username string, password
// Name returns the name of the storage backend // Name returns the name of the storage backend
func (b *webDavStorage) Name() string { func (b *webDavStorage) Name() string {
return "WebDav" return "WebDAV"
} }
// Copy copies the given file to the WebDav storage backend. // Copy copies the given file to the WebDav storage backend.
@ -59,15 +71,15 @@ func (b *webDavStorage) Copy(file string) error {
bytes, err := os.ReadFile(file) bytes, err := os.ReadFile(file)
_, name := path.Split(file) _, name := path.Split(file)
if err != nil { if err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) return fmt.Errorf("(*webDavStorage).Copy: Error reading the file to be uploaded! %w", err)
} }
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err) return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err)
} }
if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil { if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil {
return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server! %w", err)
} }
b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil return nil
} }
@ -76,7 +88,7 @@ func (b *webDavStorage) Copy(file string) error {
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath) candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil { if err != nil {
return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", err) return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage! %w", err)
} }
var matches []fs.FileInfo var matches []fs.FileInfo
var lenCandidates int var lenCandidates int
@ -95,14 +107,16 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
Pruned: uint(len(matches)), Pruned: uint(len(matches)),
} }
b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error { if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches { for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err) return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage! %w", err)
} }
} }
return nil return nil
}) }); err != nil {
return stats, err
}
return stats, nil return stats, nil
} }

View File

@ -4,38 +4,11 @@
package utilities package utilities
import ( import (
"bytes"
"errors" "errors"
"fmt"
"io"
"os"
"strings" "strings"
) )
var Noop = func() error { return nil } // Join takes a list of errors and joins them into a single error
// copy creates a copy of the file located at `dst` at `src`.
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}
// join takes a list of errors and joins them into a single error
func Join(errs ...error) error { func Join(errs ...error) error {
if len(errs) == 1 { if len(errs) == 1 {
return errs[0] return errs[0]
@ -49,42 +22,3 @@ func Join(errs ...error) error {
} }
return errors.New("[" + strings.Join(msgs, ", ") + "]") return errors.New("[" + strings.Join(msgs, ", ") + "]")
} }
// remove removes the given file or directory from disk.
func Remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}
// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func Buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}
type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}