diff --git a/.dockerignore b/.dockerignore index 246a56f..ab69334 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,19 +1,7 @@ -# Ignore everything -* - -# Exceptions: -# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks! - -!cmd/backup/*.go -!cmd/backup/*.tmpl - -!internal/storage/*.go -!internal/storage/local/*.go -!internal/storage/s3/*.go -!internal/storage/ssh/*.go -!internal/storage/webdav/*.go -!internal/utilities/*.go - -!Dockerfile -!entrypoint.sh -!go.* \ No newline at end of file +test +.github +.circleci +docs +.editorconfig +LICENSE +README.md diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index e333964..2bb5a79 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,7 +9,6 @@ import ( "time" "github.com/gofrs/flock" - "github.com/offen/docker-volume-backup/internal/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the @@ -32,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err) + return noop, fmt.Errorf("lock: error trying lock: %w", err) } if acquired { if s.encounteredLock { @@ -53,7 +52,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available") + return noop, errors.New("lock: timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index ba17ffe..8a33b60 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -58,7 +58,7 @@ type script struct { // reading from env vars or other configuration sources is expected to happen // in this method. func newScript() (*script, error) { - stdOut, logBuffer := utilities.Buffer(os.Stdout) + stdOut, logBuffer := buffer(os.Stdout) s := &script{ c: &Config{}, logger: &logrus.Logger{ @@ -72,7 +72,7 @@ func newScript() (*script, error) { LogOutput: logBuffer, Storages: map[string]StorageStats{ "S3": {}, - "WebDav": {}, + "WebDAV": {}, "SSH": {}, "Local": {}, }, @@ -107,29 +107,31 @@ func newScript() (*script, error) { s.cli = cli } - logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error { - var allParams []interface{} - allParams = append(allParams, context) - allParams = append(allParams, params...) - + logFunc := func(logType storage.LogLevel, context string, msg string, params ...interface{}) { switch logType { - case storage.INFO: - s.logger.Infof("[%s] "+msg, allParams...) - return nil - case storage.WARNING: - s.logger.Warnf("[%s] "+msg, allParams...) - return nil - case storage.ERROR: - return fmt.Errorf("[%s] "+msg, allParams...) + case storage.LogLevelWarning: + s.logger.Warnf("["+context+"] "+msg, params...) + case storage.LogLevelError: + s.logger.Errorf("["+context+"] "+msg, params...) + case storage.LogLevelInfo: default: - s.logger.Warnf("[%s] "+msg, allParams...) - return nil + s.logger.Infof("["+context+"] "+msg, params...) } } if s.c.AwsS3BucketName != "" { - if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, - s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil { + s3Config := s3.Config{ + Endpoint: s.c.AwsEndpoint, + AccessKeyID: s.c.AwsAccessKeyID, + SecretAccessKey: s.c.AwsSecretAccessKey, + IamRoleEndpoint: s.c.AwsIamRoleEndpoint, + EndpointProto: s.c.AwsEndpointProto, + EndpointInsecure: s.c.AwsEndpointInsecure, + RemotePath: s.c.AwsS3Path, + BucketName: s.c.AwsS3BucketName, + StorageClass: s.c.AwsStorageClass, + } + if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil { return nil, err } else { s.storages = append(s.storages, s3Backend) @@ -137,8 +139,14 @@ func newScript() (*script, error) { } if s.c.WebdavUrl != "" { - if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, - s.c.WebdavUrlInsecure, logFunc); err != nil { + webDavConfig := webdav.Config{ + URL: s.c.WebdavUrl, + URLInsecure: s.c.WebdavUrlInsecure, + Username: s.c.WebdavUsername, + Password: s.c.WebdavPassword, + RemotePath: s.c.WebdavPath, + } + if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil { return nil, err } else { s.storages = append(s.storages, webdavBackend) @@ -146,16 +154,30 @@ func newScript() (*script, error) { } if s.c.SSHHostName != "" { - if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, - s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil { + sshConfig := ssh.Config{ + HostName: s.c.SSHHostName, + Port: s.c.SSHPort, + User: s.c.SSHUser, + Password: s.c.SSHPassword, + IdentityFile: s.c.SSHIdentityFile, + IdentityPassphrase: s.c.SSHIdentityPassphrase, + RemotePath: s.c.SSHRemotePath, + } + if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil { return nil, err } else { s.storages = append(s.storages, sshBackend) } } - localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc) - s.storages = append(s.storages, localBackend) + if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { + localConfig := local.Config{ + ArchivePath: s.c.BackupArchive, + LatestSymlink: s.c.BackupLatestSymlink, + } + localBackend := local.NewStorageBackend(localConfig, logFunc) + s.storages = append(s.storages, localBackend) + } if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( @@ -228,14 +250,14 @@ func newScript() (*script, error) { // restart everything that has been stopped. func (s *script) stopContainers() (func() error, error) { if s.cli == nil { - return utilities.Noop, nil + return noop, nil } allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Quiet: true, }) if err != nil { - return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) + return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) } containerLabel := fmt.Sprintf( @@ -251,11 +273,11 @@ func (s *script) stopContainers() (func() error, error) { }) if err != nil { - return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) + return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) } if len(containersToStop) == 0 { - return utilities.Noop, nil + return noop, nil } s.logger.Infof( @@ -357,7 +379,7 @@ func (s *script) createArchive() error { backupSources = filepath.Join("/tmp", s.c.BackupSources) // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilities.Remove(backupSources); err != nil { + if err := remove(backupSources); err != nil { return fmt.Errorf("takeBackup: error removing snapshot: %w", err) } s.logger.Infof("Removed snapshot `%s`.", backupSources) @@ -367,15 +389,15 @@ func (s *script) createArchive() error { PreserveTimes: true, PreserveOwner: true, }); err != nil { - return fmt.Errorf("takeBackup: error creating snapshot: %w", err) + return fmt.Errorf("createArchive: error creating snapshot: %w", err) } s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources) } tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilities.Remove(tarFile); err != nil { - return fmt.Errorf("takeBackup: error removing tar file: %w", err) + if err := remove(tarFile); err != nil { + return fmt.Errorf("createArchive: error removing tar file: %w", err) } s.logger.Infof("Removed tar file `%s`.", tarFile) return nil @@ -383,7 +405,7 @@ func (s *script) createArchive() error { backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources)) if err != nil { - return fmt.Errorf("takeBackup: error getting absolute path: %w", err) + return fmt.Errorf("createArchive: error getting absolute path: %w", err) } var filesEligibleForBackup []string @@ -398,11 +420,11 @@ func (s *script) createArchive() error { filesEligibleForBackup = append(filesEligibleForBackup, path) return nil }); err != nil { - return fmt.Errorf("compress: error walking filesystem tree: %w", err) + return fmt.Errorf("createArchive: error walking filesystem tree: %w", err) } if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil { - return fmt.Errorf("takeBackup: error compressing backup folder: %w", err) + return fmt.Errorf("createArchive: error compressing backup folder: %w", err) } s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile) @@ -419,8 +441,8 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilities.Remove(gpgFile); err != nil { - return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) + if err := remove(gpgFile); err != nil { + return fmt.Errorf("encryptArchive: error removing gpg file: %w", err) } s.logger.Infof("Removed GPG file `%s`.", gpgFile) return nil @@ -428,7 +450,7 @@ func (s *script) encryptArchive() error { outFile, err := os.Create(gpgFile) if err != nil { - return fmt.Errorf("encryptBackup: error opening out file: %w", err) + return fmt.Errorf("encryptArchive: error opening out file: %w", err) } defer outFile.Close() @@ -438,17 +460,17 @@ func (s *script) encryptArchive() error { FileName: name, }, nil) if err != nil { - return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) + return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err) } defer dst.Close() src, err := os.Open(s.file) if err != nil { - return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err) + return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err) } if _, err := io.Copy(dst, src); err != nil { - return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err) + return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err) } s.file = gpgFile @@ -461,7 +483,7 @@ func (s *script) encryptArchive() error { func (s *script) copyArchive() error { _, name := path.Split(s.file) if stat, err := os.Stat(s.file); err != nil { - return fmt.Errorf("copyBackup: unable to stat backup file: %w", err) + return fmt.Errorf("copyArchive: unable to stat backup file: %w", err) } else { size := stat.Size() s.stats.BackupFile = BackupFileStats{ diff --git a/cmd/backup/util.go b/cmd/backup/util.go new file mode 100644 index 0000000..c349e7b --- /dev/null +++ b/cmd/backup/util.go @@ -0,0 +1,52 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "bytes" + "fmt" + "io" + "os" +) + +var noop = func() error { return nil } + +// remove removes the given file or directory from disk. +func remove(location string) error { + fi, err := os.Lstat(location) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err) + } + if fi.IsDir() { + err = os.RemoveAll(location) + } else { + err = os.Remove(location) + } + if err != nil { + return fmt.Errorf("remove: error removing `%s`: %w", location, err) + } + return nil +} + +// buffer takes an io.Writer and returns a wrapped version of the +// writer that writes to both the original target as well as the returned buffer +func buffer(w io.Writer) (io.Writer, *bytes.Buffer) { + buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w} + return buffering, &buffering.buf +} + +type bufferingWriter struct { + buf bytes.Buffer + writer io.Writer +} + +func (b *bufferingWriter) Write(p []byte) (n int, err error) { + if n, err := b.buf.Write(p); err != nil { + return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err) + } + return b.writer.Write(p) +} diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 5d9158b..3c33861 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -1,7 +1,11 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package local import ( "fmt" + "io" "os" "path" "path/filepath" @@ -16,14 +20,20 @@ type localStorage struct { latestSymlink string } +// Config allows configuration of a local storage backend. +type Config struct { + ArchivePath string + LatestSymlink string +} + // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend { +func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend { return &localStorage{ StorageBackend: &storage.StorageBackend{ - DestinationPath: archivePath, + DestinationPath: opts.ArchivePath, Log: logFunc, }, - latestSymlink: latestSymlink, + latestSymlink: opts.LatestSymlink, } } @@ -34,16 +44,12 @@ func (b *localStorage) Name() string { // Copy copies the given file to the local storage backend. func (b *localStorage) Copy(file string) error { - if _, err := os.Stat(b.DestinationPath); os.IsNotExist(err) { - return nil - } - _, name := path.Split(file) - if err := utilities.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err) + if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil { + return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive! %w", err) } - b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) + b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) if b.latestSymlink != "" { symlink := path.Join(b.DestinationPath, b.latestSymlink) @@ -51,9 +57,9 @@ func (b *localStorage) Copy(file string) error { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: error creating latest symlink! %w", err) + return fmt.Errorf("(*localStorage).Copy: error creating latest symlink! %w", err) } - b.Log(storage.INFO, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) + b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) } return nil @@ -67,8 +73,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return nil, b.Log(storage.ERROR, b.Name(), - "Prune: Error looking up matching files using pattern %s! %w", + return nil, fmt.Errorf( + "(*localStorage).Prune: Error looking up matching files using pattern %s: %w", globPattern, err, ) @@ -78,8 +84,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return nil, b.Log(storage.ERROR, b.Name(), - "Prune: Error calling Lstat on file %s! %w", + return nil, fmt.Errorf( + "(*localStorage).Prune: Error calling Lstat on file %s: %w", candidate, err, ) @@ -94,8 +100,8 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return nil, b.Log(storage.ERROR, b.Name(), - "Prune: Error calling stat on file %s! %w", + return nil, fmt.Errorf( + "(*localStorage).Prune: Error calling stat on file %s! %w", candidate, err, ) @@ -110,7 +116,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage Pruned: uint(len(matches)), } - b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error { + if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { @@ -118,14 +124,37 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage } } if len(removeErrors) != 0 { - return b.Log(storage.ERROR, b.Name(), - "Prune: %d error(s) deleting local files, starting with: %w", + return fmt.Errorf( + "(*localStorage).Prune: %d error(s) deleting local files, starting with: %w", len(removeErrors), utilities.Join(removeErrors...), ) } return nil - }) + }); err != nil { + return stats, err + } return stats, nil } + +// copy creates a copy of the file located at `dst` at `src`. +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + + _, err = io.Copy(out, in) + if err != nil { + out.Close() + return err + } + return out.Close() +} diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index cb162ea..76b1e38 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -1,8 +1,12 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package s3 import ( "context" "errors" + "fmt" "path" "path/filepath" "time" @@ -20,54 +24,66 @@ type s3Storage struct { storageClass string } +// Config contains values that define the configuration of a S3 backend. +type Config struct { + Endpoint string + AccessKeyID string + SecretAccessKey string + IamRoleEndpoint string + EndpointProto string + EndpointInsecure bool + RemotePath string + BucketName string + StorageClass string +} + // NewStorageBackend creates and initializes a new S3/Minio storage backend. -func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, logFunc storage.Log) (storage.Backend, error) { +func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) { var creds *credentials.Credentials - if accessKeyId != "" && secretAccessKey != "" { + if opts.AccessKeyID != "" && opts.SecretAccessKey != "" { creds = credentials.NewStaticV4( - accessKeyId, - secretAccessKey, + opts.AccessKeyID, + opts.SecretAccessKey, "", ) - } else if iamRoleEndpoint != "" { - creds = credentials.NewIAM(iamRoleEndpoint) + } else if opts.IamRoleEndpoint != "" { + creds = credentials.NewIAM(opts.IamRoleEndpoint) } else { - return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") + return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") } options := minio.Options{ Creds: creds, - Secure: endpointProto == "https", + Secure: opts.EndpointProto == "https", } - if endpointInsecure { + if opts.EndpointInsecure { if !options.Secure { - return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") + return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https") } transport, err := minio.DefaultTransport(true) if err != nil { - return nil, logFunc(storage.ERROR, "S3", "NewScript: failed to create default minio transport") + return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport") } transport.TLSClientConfig.InsecureSkipVerify = true options.Transport = transport } - mc, err := minio.New(endpoint, &options) + mc, err := minio.New(opts.Endpoint, &options) if err != nil { - return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err) + return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err) } return &s3Storage{ StorageBackend: &storage.StorageBackend{ - DestinationPath: remotePath, + DestinationPath: opts.RemotePath, Log: logFunc, }, client: mc, - bucket: bucket, - storageClass: storageClass, + bucket: opts.BucketName, + storageClass: opts.StorageClass, }, nil } @@ -85,9 +101,9 @@ func (b *s3Storage) Copy(file string) error { StorageClass: b.storageClass, }); err != nil { errResp := minio.ToErrorResponse(err) - return b.Log(storage.ERROR, b.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) + return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) } - b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) + b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) return nil } @@ -105,8 +121,8 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return nil, b.Log(storage.ERROR, b.Name(), - "Prune: Error looking up candidates from remote storage! %w", + return nil, fmt.Errorf( + "(*s3Storage).Prune: Error looking up candidates from remote storage! %w", candidate.Err, ) } @@ -120,7 +136,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr Pruned: uint(len(matches)), } - b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error { + if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -139,7 +155,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr return utilities.Join(removeErrors...) } return nil - }) + }); err != nil { + return stats, err + } return stats, nil } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index c7dad70..9194528 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -1,3 +1,6 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package ssh import ( @@ -23,47 +26,56 @@ type sshStorage struct { hostName string } -// NewStorageBackend creates and initializes a new SSH storage backend. -func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - logFunc storage.Log) (storage.Backend, error) { +// Config allows to configure a SSH backend. +type Config struct { + HostName string + Port string + User string + Password string + IdentityFile string + IdentityPassphrase string + RemotePath string +} +// NewStorageBackend creates and initializes a new SSH storage backend. +func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) { var authMethods []ssh.AuthMethod - if password != "" { - authMethods = append(authMethods, ssh.Password(password)) + if opts.Password != "" { + authMethods = append(authMethods, ssh.Password(opts.Password)) } - if _, err := os.Stat(identityFile); err == nil { - key, err := ioutil.ReadFile(identityFile) + if _, err := os.Stat(opts.IdentityFile); err == nil { + key, err := ioutil.ReadFile(opts.IdentityFile) if err != nil { - return nil, errors.New("newScript: error reading the private key") + return nil, errors.New("NewStorageBackend: error reading the private key") } var signer ssh.Signer - if identityPassphrase != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(identityPassphrase)) + if opts.IdentityPassphrase != "" { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase)) if err != nil { - return nil, errors.New("newScript: error parsing the encrypted private key") + return nil, errors.New("NewStorageBackend: error parsing the encrypted private key") } authMethods = append(authMethods, ssh.PublicKeys(signer)) } else { signer, err = ssh.ParsePrivateKey(key) if err != nil { - return nil, errors.New("newScript: error parsing the private key") + return nil, errors.New("NewStorageBackend: error parsing the private key") } authMethods = append(authMethods, ssh.PublicKeys(signer)) } } sshClientConfig := &ssh.ClientConfig{ - User: user, + User: opts.User, Auth: authMethods, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig) + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig) if err != nil { - return nil, logFunc(storage.ERROR, "SSH", "NewScript: Error creating ssh client! %w", err) + return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err) } _, _, err = sshClient.SendRequest("keepalive", false, nil) if err != nil { @@ -72,17 +84,17 @@ func NewStorageBackend(hostName string, port string, user string, password strin sftpClient, err := sftp.NewClient(sshClient) if err != nil { - return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err) + return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err) } return &sshStorage{ StorageBackend: &storage.StorageBackend{ - DestinationPath: remotePath, + DestinationPath: opts.RemotePath, Log: logFunc, }, client: sshClient, sftpClient: sftpClient, - hostName: hostName, + hostName: opts.HostName, }, nil } @@ -96,13 +108,13 @@ func (b *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) + return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded! %w", err) } defer source.Close() destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name)) if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error creating file on SSH storage! %w", err) + return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage! %w", err) } defer destination.Close() @@ -112,31 +124,31 @@ func (b *sshStorage) Copy(file string) error { if err == io.EOF { tot, err := destination.Write(chunk[:num]) if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") + return errors.New("(*sshStorage).Copy: failed to write stream") } break } if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err) } tot, err := destination.Write(chunk[:num]) if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") + return fmt.Errorf("(*sshStorage).Copy: failed to write stream") } } - b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath) + b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath) return nil } @@ -145,7 +157,7 @@ func (b *sshStorage) Copy(file string) error { func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := b.sftpClient.ReadDir(b.DestinationPath) if err != nil { - return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error reading directory from SSH storage! %w", err) + return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage! %w", err) } var matches []string @@ -163,14 +175,16 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P Pruned: uint(len(matches)), } - b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error { + if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { - return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err) + return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage! %w", err) } } return nil - }) + }); err != nil { + return stats, err + } return stats, nil } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index d33d2f4..f0b4493 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -1,3 +1,6 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package storage import ( @@ -18,15 +21,15 @@ type StorageBackend struct { Log Log } -type LogType string +type LogLevel int const ( - INFO LogType = "INFO" - WARNING LogType = "WARNING" - ERROR LogType = "ERROR" + LogLevelInfo LogLevel = iota + LogLevelWarning + LogLevelError ) -type Log func(logType LogType, context string, msg string, params ...interface{}) error +type Log func(logType LogLevel, context string, msg string, params ...interface{}) // PruneStats is a wrapper struct for returning stats after pruning type PruneStats struct { @@ -41,7 +44,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, if err := doRemoveFiles(); err != nil { return err } - b.Log(INFO, context, + b.Log(LogLevelInfo, context, "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, @@ -49,10 +52,10 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, b.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - b.Log(WARNING, context, "The current configuration would delete all %d existing %s.", lenMatches, description) - b.Log(WARNING, context, "Refusing to do so, please check your configuration.") + b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description) + b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.") } else { - b.Log(INFO, context, "None of %d existing %s were pruned.", lenCandidates, description) + b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index e2d7fd7..72b06d1 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -1,7 +1,11 @@ +// Copyright 2022 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package webdav import ( "errors" + "fmt" "io/fs" "net/http" "os" @@ -20,28 +24,36 @@ type webDavStorage struct { url string } +// Config allows to configure a WebDAV storage backend. +type Config struct { + URL string + RemotePath string + Username string + Password string + URLInsecure bool +} + // NewStorageBackend creates and initializes a new WebDav storage backend. -func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, - logFunc storage.Log) (storage.Backend, error) { +func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) { - if username == "" || password == "" { - return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + if opts.Username == "" || opts.Password == "" { + return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided") } else { - webdavClient := gowebdav.NewClient(url, username, password) + webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password) - if urlInsecure { + if opts.URLInsecure { defaultTransport, ok := http.DefaultTransport.(*http.Transport) if !ok { - return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") + return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport") } webdavTransport := defaultTransport.Clone() - webdavTransport.TLSClientConfig.InsecureSkipVerify = urlInsecure + webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure webdavClient.SetTransport(webdavTransport) } return &webDavStorage{ StorageBackend: &storage.StorageBackend{ - DestinationPath: remotePath, + DestinationPath: opts.RemotePath, Log: logFunc, }, client: webdavClient, @@ -51,7 +63,7 @@ func NewStorageBackend(url string, remotePath string, username string, password // Name returns the name of the storage backend func (b *webDavStorage) Name() string { - return "WebDav" + return "WebDAV" } // Copy copies the given file to the WebDav storage backend. @@ -59,15 +71,15 @@ func (b *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) + return fmt.Errorf("(*webDavStorage).Copy: Error reading the file to be uploaded! %w", err) } if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err) + return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err) } if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil { - return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) + return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server! %w", err) } - b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath) + b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath) return nil } @@ -76,7 +88,7 @@ func (b *webDavStorage) Copy(file string) error { func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := b.client.ReadDir(b.DestinationPath) if err != nil { - return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", err) + return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage! %w", err) } var matches []fs.FileInfo var lenCandidates int @@ -95,14 +107,16 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag Pruned: uint(len(matches)), } - b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error { + if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { - return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err) + return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage! %w", err) } } return nil - }) + }); err != nil { + return stats, err + } return stats, nil } diff --git a/internal/utilities/util.go b/internal/utilities/util.go index 5d55a2d..17debbe 100644 --- a/internal/utilities/util.go +++ b/internal/utilities/util.go @@ -4,38 +4,11 @@ package utilities import ( - "bytes" "errors" - "fmt" - "io" - "os" "strings" ) -var Noop = func() error { return nil } - -// copy creates a copy of the file located at `dst` at `src`. -func CopyFile(src, dst string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return err - } - - _, err = io.Copy(out, in) - if err != nil { - out.Close() - return err - } - return out.Close() -} - -// join takes a list of errors and joins them into a single error +// Join takes a list of errors and joins them into a single error func Join(errs ...error) error { if len(errs) == 1 { return errs[0] @@ -49,42 +22,3 @@ func Join(errs ...error) error { } return errors.New("[" + strings.Join(msgs, ", ") + "]") } - -// remove removes the given file or directory from disk. -func Remove(location string) error { - fi, err := os.Lstat(location) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err) - } - if fi.IsDir() { - err = os.RemoveAll(location) - } else { - err = os.Remove(location) - } - if err != nil { - return fmt.Errorf("remove: error removing `%s`: %w", location, err) - } - return nil -} - -// buffer takes an io.Writer and returns a wrapped version of the -// writer that writes to both the original target as well as the returned buffer -func Buffer(w io.Writer) (io.Writer, *bytes.Buffer) { - buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w} - return buffering, &buffering.buf -} - -type bufferingWriter struct { - buf bytes.Buffer - writer io.Writer -} - -func (b *bufferingWriter) Write(p []byte) (n int, err error) { - if n, err := b.buf.Write(p); err != nil { - return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err) - } - return b.writer.Write(p) -}