From 52c22a1891cb99f8f0f04baef2cdadb27429e2f6 Mon Sep 17 00:00:00 2001 From: Frederik Ring Date: Fri, 16 Feb 2024 15:35:42 +0100 Subject: [PATCH] Auto prepend caller when wrapping errors --- cmd/backup/archive.go | 42 +++++++++--------- cmd/backup/command.go | 21 ++++----- cmd/backup/config.go | 22 +++++----- cmd/backup/config_provider.go | 19 ++++---- cmd/backup/copy_archive.go | 6 +-- cmd/backup/create_archive.go | 13 +++--- cmd/backup/encrypt_archive.go | 11 ++--- cmd/backup/exec.go | 29 ++++++------ cmd/backup/hooks.go | 5 ++- cmd/backup/lock.go | 6 +-- cmd/backup/notifications.go | 9 ++-- cmd/backup/prune_backups.go | 3 +- cmd/backup/run_script.go | 38 ++++++++++------ cmd/backup/script.go | 29 ++++++------ cmd/backup/stop_restart.go | 68 +++++++++++++++++------------ cmd/backup/util.go | 7 +-- internal/errwrap/wrap.go | 43 ++++++++++++++++++ internal/storage/azure/azure.go | 19 ++++---- internal/storage/dropbox/dropbox.go | 24 +++++----- internal/storage/local/local.go | 37 ++++++++++------ internal/storage/s3/s3.go | 32 ++++++++------ internal/storage/ssh/ssh.go | 30 ++++++------- internal/storage/storage.go | 6 +-- internal/storage/webdav/webdav.go | 16 +++---- 24 files changed, 313 insertions(+), 222 deletions(-) create mode 100644 internal/errwrap/wrap.go diff --git a/cmd/backup/archive.go b/cmd/backup/archive.go index 32297e4..f0895ca 100644 --- a/cmd/backup/archive.go +++ b/cmd/backup/archive.go @@ -16,23 +16,23 @@ import ( "runtime" "strings" - "github.com/klauspost/pgzip" - "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + "github.com/offen/docker-volume-backup/internal/errwrap" ) func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error { inputFilePath = stripTrailingSlashes(inputFilePath) inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath) if err != nil { - return fmt.Errorf("createArchive: error transposing given file paths: %w", err) + return errwrap.Wrap(err, "error transposing given file paths") } if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil { - return fmt.Errorf("createArchive: error creating output file path: %w", err) + return errwrap.Wrap(err, "error creating output file path") } if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil { - return fmt.Errorf("createArchive: error creating archive: %w", err) + return errwrap.Wrap(err, "error creating archive") } return nil @@ -58,35 +58,35 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error) func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error { file, err := os.Create(outFilePath) if err != nil { - return fmt.Errorf("compress: error creating out file: %w", err) + return errwrap.Wrap(err, "error creating out file") } prefix := path.Dir(outFilePath) compressWriter, err := getCompressionWriter(file, algo, concurrency) if err != nil { - return fmt.Errorf("compress: error getting compression writer: %w", err) + return errwrap.Wrap(err, "error getting compression writer") } tarWriter := tar.NewWriter(compressWriter) for _, p := range paths { if err := writeTarball(p, tarWriter, prefix); err != nil { - return fmt.Errorf("compress: error writing %s to archive: %w", p, err) + return errwrap.Wrap(err, fmt.Sprintf("error writing %s to archive", p)) } } err = tarWriter.Close() if err != nil { - return fmt.Errorf("compress: error closing tar writer: %w", err) + return errwrap.Wrap(err, "error closing tar writer") } err = compressWriter.Close() if err != nil { - return fmt.Errorf("compress: error closing compression writer: %w", err) + return errwrap.Wrap(err, "error closing compression writer") } err = file.Close() if err != nil { - return fmt.Errorf("compress: error closing file: %w", err) + return errwrap.Wrap(err, "error closing file") } return nil @@ -97,7 +97,7 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write case "gz": w, err := pgzip.NewWriterLevel(file, 5) if err != nil { - return nil, fmt.Errorf("getCompressionWriter: gzip error: %w", err) + return nil, errwrap.Wrap(err, "gzip error") } if concurrency == 0 { @@ -105,25 +105,25 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write } if err := w.SetConcurrency(1<<20, concurrency); err != nil { - return nil, fmt.Errorf("getCompressionWriter: error setting concurrency: %w", err) + return nil, errwrap.Wrap(err, "error setting concurrency") } return w, nil case "zst": compressWriter, err := zstd.NewWriter(file) if err != nil { - return nil, fmt.Errorf("getCompressionWriter: zstd error: %w", err) + return nil, errwrap.Wrap(err, "zstd error") } return compressWriter, nil default: - return nil, fmt.Errorf("getCompressionWriter: unsupported compression algorithm: %s", algo) + return nil, errwrap.Wrap(nil, fmt.Sprintf("unsupported compression algorithm: %s", algo)) } } func writeTarball(path string, tarWriter *tar.Writer, prefix string) error { fileInfo, err := os.Lstat(path) if err != nil { - return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err) + return errwrap.Wrap(err, fmt.Sprintf("error getting file info for %s", path)) } if fileInfo.Mode()&os.ModeSocket == os.ModeSocket { @@ -134,19 +134,19 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error { if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink { var err error if link, err = os.Readlink(path); err != nil { - return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err) + return errwrap.Wrap(err, fmt.Sprintf("error resolving symlink %s", path)) } } header, err := tar.FileInfoHeader(fileInfo, link) if err != nil { - return fmt.Errorf("writeTarball: error getting file info header: %w", err) + return errwrap.Wrap(err, "error getting file info header") } header.Name = strings.TrimPrefix(path, prefix) err = tarWriter.WriteHeader(header) if err != nil { - return fmt.Errorf("writeTarball: error writing file info header: %w", err) + return errwrap.Wrap(err, "error writing file info header") } if !fileInfo.Mode().IsRegular() { @@ -155,13 +155,13 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error { file, err := os.Open(path) if err != nil { - return fmt.Errorf("writeTarball: error opening %s: %w", path, err) + return errwrap.Wrap(err, fmt.Sprintf("error opening %s", path)) } defer file.Close() _, err = io.Copy(tarWriter, file) if err != nil { - return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err) + return errwrap.Wrap(err, fmt.Sprintf("error copying %s to tar writer", path)) } return nil diff --git a/cmd/backup/command.go b/cmd/backup/command.go index cdaac0f..8d96e83 100644 --- a/cmd/backup/command.go +++ b/cmd/backup/command.go @@ -10,6 +10,7 @@ import ( "os/signal" "syscall" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/robfig/cron/v3" ) @@ -31,12 +32,12 @@ func newCommand() *command { func (c *command) runAsCommand() error { configurations, err := sourceConfiguration(configStrategyEnv) if err != nil { - return fmt.Errorf("runAsCommand: error loading env vars: %w", err) + return errwrap.Wrap(err, "error loading env vars") } for _, config := range configurations { if err := runScript(config); err != nil { - return fmt.Errorf("runAsCommand: error running script: %w", err) + return errwrap.Wrap(err, "error running script") } } @@ -59,12 +60,12 @@ func (c *command) runInForeground(opts foregroundOpts) error { ) if err := c.schedule(configStrategyConfd); err != nil { - return fmt.Errorf("runInForeground: error scheduling: %w", err) + return errwrap.Wrap(err, "error scheduling") } if opts.profileCronExpression != "" { if _, err := c.cr.AddFunc(opts.profileCronExpression, c.profile); err != nil { - return fmt.Errorf("runInForeground: error adding profiling job: %w", err) + return errwrap.Wrap(err, "error adding profiling job") } } @@ -81,7 +82,7 @@ func (c *command) runInForeground(opts foregroundOpts) error { return nil case <-c.reload: if err := c.schedule(configStrategyConfd); err != nil { - return fmt.Errorf("runInForeground: error reloading configuration: %w", err) + return errwrap.Wrap(err, "error reloading configuration") } } } @@ -96,7 +97,7 @@ func (c *command) schedule(strategy configStrategy) error { configurations, err := sourceConfiguration(strategy) if err != nil { - return fmt.Errorf("schedule: error sourcing configuration: %w", err) + return errwrap.Wrap(err, "error sourcing configuration") } for _, cfg := range configurations { @@ -114,7 +115,7 @@ func (c *command) schedule(strategy configStrategy) error { fmt.Sprintf( "Unexpected error running schedule %s: %v", config.BackupCronExpression, - err, + errwrap.Unwrap(err), ), "error", err, @@ -123,7 +124,7 @@ func (c *command) schedule(strategy configStrategy) error { }) if err != nil { - return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err) + return errwrap.Wrap(err, fmt.Sprintf("error adding schedule %s", config.BackupCronExpression)) } c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", config.source, config.BackupCronExpression)) if ok := checkCronSchedule(config.BackupCronExpression); !ok { @@ -132,7 +133,7 @@ func (c *command) schedule(strategy configStrategy) error { ) if err != nil { - return fmt.Errorf("schedule: error scheduling: %w", err) + return errwrap.Wrap(err, "error scheduling") } c.schedules = append(c.schedules, id) } @@ -146,7 +147,7 @@ func (c *command) schedule(strategy configStrategy) error { func (c *command) must(err error) { if err != nil { c.logger.Error( - fmt.Sprintf("Fatal error running command: %v", err), + fmt.Sprintf("Fatal error running command: %v", errwrap.Unwrap(err)), "error", err, ) diff --git a/cmd/backup/config.go b/cmd/backup/config.go index a199dac..42e8a98 100644 --- a/cmd/backup/config.go +++ b/cmd/backup/config.go @@ -11,6 +11,8 @@ import ( "regexp" "strconv" "time" + + "github.com/offen/docker-volume-backup/internal/errwrap" ) // Config holds all configuration values that are expected to be set @@ -92,7 +94,7 @@ func (c *CompressionType) Decode(v string) error { *c = CompressionType(v) return nil default: - return fmt.Errorf("config: error decoding compression type %s", v) + return errwrap.Wrap(nil, fmt.Sprintf("error decoding compression type %s", v)) } } @@ -115,7 +117,7 @@ func (c *CertDecoder) Decode(v string) error { block, _ := pem.Decode(content) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return fmt.Errorf("config: error parsing certificate: %w", err) + return errwrap.Wrap(err, "error parsing certificate") } *c = CertDecoder{Cert: cert} return nil @@ -131,7 +133,7 @@ func (r *RegexpDecoder) Decode(v string) error { } re, err := regexp.Compile(v) if err != nil { - return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err) + return errwrap.Wrap(err, fmt.Sprintf("error compiling given regexp `%s`", v)) } *r = RegexpDecoder{Re: re} return nil @@ -143,10 +145,10 @@ type NaturalNumber int func (n *NaturalNumber) Decode(v string) error { asInt, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("config: error converting %s to int", v) + return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v)) } if asInt <= 0 { - return fmt.Errorf("config: expected a natural number, got %d", asInt) + return errwrap.Wrap(nil, fmt.Sprintf("expected a natural number, got %d", asInt)) } *n = NaturalNumber(asInt) return nil @@ -162,10 +164,10 @@ type WholeNumber int func (n *WholeNumber) Decode(v string) error { asInt, err := strconv.Atoi(v) if err != nil { - return fmt.Errorf("config: error converting %s to int", v) + return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v)) } if asInt < 0 { - return fmt.Errorf("config: expected a whole, positive number, including zero. Got %d", asInt) + return errwrap.Wrap(nil, fmt.Sprintf("expected a whole, positive number, including zero. Got %d", asInt)) } *n = WholeNumber(asInt) return nil @@ -191,12 +193,12 @@ func (c *Config) applyEnv() (func() error, error) { for _, lookup := range lookups { if !lookup.ok { if err := os.Unsetenv(lookup.key); err != nil { - return fmt.Errorf("(*Config).applyEnv: error unsetting env var %s: %w", lookup.key, err) + return errwrap.Wrap(err, fmt.Sprintf("error unsetting env var %s", lookup.key)) } continue } if err := os.Setenv(lookup.key, lookup.value); err != nil { - return fmt.Errorf("(*Config).applyEnv: error setting back env var %s: %w", lookup.key, err) + return errwrap.Wrap(err, fmt.Sprintf("error setting back env var %s", lookup.key)) } } return nil @@ -206,7 +208,7 @@ func (c *Config) applyEnv() (func() error, error) { current, ok := os.LookupEnv(key) lookups = append(lookups, envVarLookup{ok: ok, key: key, value: current}) if err := os.Setenv(key, value); err != nil { - return unset, fmt.Errorf("(*Config).applyEnv: error setting env var: %w", err) + return unset, errwrap.Wrap(err, "error setting env var") } } return unset, nil diff --git a/cmd/backup/config_provider.go b/cmd/backup/config_provider.go index b0a2755..4225d70 100644 --- a/cmd/backup/config_provider.go +++ b/cmd/backup/config_provider.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 - Offen Authors +// Copyright 2024 - Offen Authors // SPDX-License-Identifier: MPL-2.0 package main @@ -9,6 +9,7 @@ import ( "path/filepath" "github.com/joho/godotenv" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/envconfig" ) @@ -33,11 +34,11 @@ func sourceConfiguration(strategy configStrategy) ([]*Config, error) { if os.IsNotExist(err) { return sourceConfiguration(configStrategyEnv) } - return nil, fmt.Errorf("sourceConfiguration: error loading config files: %w", err) + return nil, errwrap.Wrap(err, "error loading config files") } return cs, nil default: - return nil, fmt.Errorf("sourceConfiguration: received unknown config strategy: %v", strategy) + return nil, errwrap.Wrap(nil, fmt.Sprintf("received unknown config strategy: %v", strategy)) } } @@ -68,7 +69,7 @@ func loadConfig(lookup envProxy) (*Config, error) { var c = &Config{} if err := envconfig.Process("", c); err != nil { - return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err) + return nil, errwrap.Wrap(err, "failed to process configuration values") } return c, nil @@ -77,7 +78,7 @@ func loadConfig(lookup envProxy) (*Config, error) { func loadConfigFromEnvVars() (*Config, error) { c, err := loadConfig(os.LookupEnv) if err != nil { - return nil, fmt.Errorf("loadEnvVars: error loading config from environment: %w", err) + return nil, errwrap.Wrap(err, "error loading config from environment") } c.source = "from environment" return c, nil @@ -89,7 +90,7 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) { if os.IsNotExist(err) { return nil, err } - return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err) + return nil, errwrap.Wrap(err, "failed to read files from env directory") } configs := []*Config{} @@ -100,11 +101,11 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) { p := filepath.Join(directory, item.Name()) f, err := os.ReadFile(p) if err != nil { - return nil, fmt.Errorf("loadEnvFiles: error reading %s: %w", item.Name(), err) + return nil, errwrap.Wrap(err, fmt.Sprintf("error reading %s", item.Name())) } envFile, err := godotenv.Unmarshal(os.ExpandEnv(string(f))) if err != nil { - return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err) + return nil, errwrap.Wrap(err, fmt.Sprintf("error reading config file %s", p)) } lookup := func(key string) (string, bool) { val, ok := envFile[key] @@ -115,7 +116,7 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) { } c, err := loadConfig(lookup) if err != nil { - return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err) + return nil, errwrap.Wrap(err, fmt.Sprintf("error loading config from file %s", p)) } c.source = item.Name() c.additionalEnvVars = envFile diff --git a/cmd/backup/copy_archive.go b/cmd/backup/copy_archive.go index d111099..43184ce 100644 --- a/cmd/backup/copy_archive.go +++ b/cmd/backup/copy_archive.go @@ -4,10 +4,10 @@ package main import ( - "fmt" "os" "path" + "github.com/offen/docker-volume-backup/internal/errwrap" "golang.org/x/sync/errgroup" ) @@ -16,7 +16,7 @@ import ( func (s *script) copyArchive() error { _, name := path.Split(s.file) if stat, err := os.Stat(s.file); err != nil { - return fmt.Errorf("copyArchive: unable to stat backup file: %w", err) + return errwrap.Wrap(err, "unable to stat backup file") } else { size := stat.Size() s.stats.BackupFile = BackupFileStats{ @@ -34,7 +34,7 @@ func (s *script) copyArchive() error { }) } if err := eg.Wait(); err != nil { - return fmt.Errorf("copyArchive: error copying archive: %w", err) + return errwrap.Wrap(err, "error copying archive") } return nil diff --git a/cmd/backup/create_archive.go b/cmd/backup/create_archive.go index de6d3f2..3fb45c7 100644 --- a/cmd/backup/create_archive.go +++ b/cmd/backup/create_archive.go @@ -8,6 +8,7 @@ import ( "io/fs" "path/filepath" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/otiai10/copy" ) @@ -27,7 +28,7 @@ func (s *script) createArchive() error { // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { if err := remove(backupSources); err != nil { - return fmt.Errorf("createArchive: error removing snapshot: %w", err) + return errwrap.Wrap(err, "error removing snapshot") } s.logger.Info( fmt.Sprintf("Removed snapshot `%s`.", backupSources), @@ -38,7 +39,7 @@ func (s *script) createArchive() error { PreserveTimes: true, PreserveOwner: true, }); err != nil { - return fmt.Errorf("createArchive: error creating snapshot: %w", err) + return errwrap.Wrap(err, "error creating snapshot") } s.logger.Info( fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources), @@ -48,7 +49,7 @@ func (s *script) createArchive() error { tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { if err := remove(tarFile); err != nil { - return fmt.Errorf("createArchive: error removing tar file: %w", err) + return errwrap.Wrap(err, "error removing tar file") } s.logger.Info( fmt.Sprintf("Removed tar file `%s`.", tarFile), @@ -58,7 +59,7 @@ func (s *script) createArchive() error { backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources)) if err != nil { - return fmt.Errorf("createArchive: error getting absolute path: %w", err) + return errwrap.Wrap(err, "error getting absolute path") } var filesEligibleForBackup []string @@ -73,11 +74,11 @@ func (s *script) createArchive() error { filesEligibleForBackup = append(filesEligibleForBackup, path) return nil }); err != nil { - return fmt.Errorf("createArchive: error walking filesystem tree: %w", err) + return errwrap.Wrap(err, "error walking filesystem tree") } if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil { - return fmt.Errorf("createArchive: error compressing backup folder: %w", err) + return errwrap.Wrap(err, "error compressing backup folder") } s.logger.Info( diff --git a/cmd/backup/encrypt_archive.go b/cmd/backup/encrypt_archive.go index 4bc38dd..9785f04 100644 --- a/cmd/backup/encrypt_archive.go +++ b/cmd/backup/encrypt_archive.go @@ -10,6 +10,7 @@ import ( "path" openpgp "github.com/ProtonMail/go-crypto/openpgp/v2" + "github.com/offen/docker-volume-backup/internal/errwrap" ) // encryptArchive encrypts the backup file using PGP and the configured passphrase. @@ -23,7 +24,7 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { if err := remove(gpgFile); err != nil { - return fmt.Errorf("encryptArchive: error removing gpg file: %w", err) + return errwrap.Wrap(err, "error removing gpg file") } s.logger.Info( fmt.Sprintf("Removed GPG file `%s`.", gpgFile), @@ -33,7 +34,7 @@ func (s *script) encryptArchive() error { outFile, err := os.Create(gpgFile) if err != nil { - return fmt.Errorf("encryptArchive: error opening out file: %w", err) + return errwrap.Wrap(err, "error opening out file") } defer outFile.Close() @@ -42,17 +43,17 @@ func (s *script) encryptArchive() error { FileName: name, }, nil) if err != nil { - return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err) + return errwrap.Wrap(err, "error encrypting backup file") } defer dst.Close() src, err := os.Open(s.file) if err != nil { - return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err) + return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file)) } if _, err := io.Copy(dst, src); err != nil { - return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err) + return errwrap.Wrap(err, "error writing ciphertext to file") } s.file = gpgFile diff --git a/cmd/backup/exec.go b/cmd/backup/exec.go index d001469..1d762f6 100644 --- a/cmd/backup/exec.go +++ b/cmd/backup/exec.go @@ -19,6 +19,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" + "github.com/offen/docker-volume-backup/internal/errwrap" "golang.org/x/sync/errgroup" ) @@ -35,12 +36,12 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte, User: user, }) if err != nil { - return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err) + return nil, nil, errwrap.Wrap(err, "error creating container exec") } resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{}) if err != nil { - return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err) + return nil, nil, errwrap.Wrap(err, "error attaching container exec") } defer resp.Close() @@ -53,25 +54,25 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte, }() if err := <-outputDone; err != nil { - return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err) + return nil, nil, errwrap.Wrap(err, "error demultiplexing output") } stdout, err := io.ReadAll(&outBuf) if err != nil { - return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err) + return nil, nil, errwrap.Wrap(err, "error reading stdout") } stderr, err := io.ReadAll(&errBuf) if err != nil { - return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err) + return nil, nil, errwrap.Wrap(err, "error reading stderr") } res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID) if err != nil { - return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err) + return nil, nil, errwrap.Wrap(err, "error inspecting container exec") } if res.ExitCode > 0 { - return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode) + return stdout, stderr, errwrap.Wrap(nil, fmt.Sprintf("running command exited %d", res.ExitCode)) } return stdout, stderr, nil @@ -91,7 +92,7 @@ func (s *script) runLabeledCommands(label string) error { Filters: filters.NewArgs(f...), }) if err != nil { - return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err) + return errwrap.Wrap(err, "error querying for containers") } var hasDeprecatedContainers bool @@ -104,7 +105,7 @@ func (s *script) runLabeledCommands(label string) error { Filters: filters.NewArgs(f...), }) if err != nil { - return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err) + return errwrap.Wrap(err, "error querying for containers") } if len(deprecatedContainers) != 0 { hasDeprecatedContainers = true @@ -121,7 +122,7 @@ func (s *script) runLabeledCommands(label string) error { Filters: filters.NewArgs(f...), }) if err != nil { - return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err) + return errwrap.Wrap(err, "error querying for containers") } if len(deprecatedContainers) != 0 { hasDeprecatedContainers = true @@ -164,14 +165,14 @@ func (s *script) runLabeledCommands(label string) error { os.Stdout.Write(stdout) } if err != nil { - return fmt.Errorf("runLabeledCommands: error executing command: %w", err) + return errwrap.Wrap(err, "error executing command") } return nil }) } if err := g.Wait(); err != nil { - return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err) + return errwrap.Wrap(err, "error from errgroup") } return nil } @@ -191,12 +192,12 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func( } return func() (err error) { if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil { - err = fmt.Errorf("(*script).withLabeledCommands: %s: error running pre commands: %w", step, err) + err = errwrap.Wrap(err, fmt.Sprintf("error running %s-pre commands", step)) return } defer func() { if derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)); derr != nil { - err = errors.Join(err, fmt.Errorf("(*script).withLabeledCommands: error running %s-post commands: %w", step, derr)) + err = errors.Join(err, errwrap.Wrap(derr, fmt.Sprintf("error running %s-post commands", step))) } }() err = cb() diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index 24f8042..86a3d20 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -5,8 +5,9 @@ package main import ( "errors" - "fmt" "sort" + + "github.com/offen/docker-volume-backup/internal/errwrap" ) // hook contains a queued action that can be trigger them when the script @@ -47,7 +48,7 @@ func (s *script) runHooks(err error) error { continue } if actionErr := hook.action(err); actionErr != nil { - actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr)) + actionErrors = append(actionErrors, errwrap.Wrap(actionErr, "error running hook")) } } if len(actionErrors) != 0 { diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index 54d48dd..085e120 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -4,11 +4,11 @@ package main import ( - "errors" "fmt" "time" "github.com/gofrs/flock" + "github.com/offen/docker-volume-backup/internal/errwrap" ) // lock opens a lockfile at the given location, keeping it locked until the @@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return noop, fmt.Errorf("lock: error trying to lock: %w", err) + return noop, errwrap.Wrap(err, "error trying to lock") } if acquired { if s.encounteredLock { @@ -54,7 +54,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return noop, errors.New("lock: timed out waiting for lockfile to become available") + return noop, errwrap.Wrap(nil, "timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 7ec60f7..7b02a33 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -14,6 +14,7 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" + "github.com/offen/docker-volume-backup/internal/errwrap" ) //go:embed notifications.tmpl @@ -37,16 +38,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er titleBuf := &bytes.Buffer{} if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil { - return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err) + return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", titleTemplate)) } bodyBuf := &bytes.Buffer{} if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil { - return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err) + return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", bodyTemplate)) } if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil { - return fmt.Errorf("notify: error notifying: %w", err) + return errwrap.Wrap(err, "error sending notification") } return nil } @@ -70,7 +71,7 @@ func (s *script) sendNotification(title, body string) error { } } if len(errs) != 0 { - return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...)) + return errwrap.Wrap(errors.Join(errs...), "error sending message") } return nil } diff --git a/cmd/backup/prune_backups.go b/cmd/backup/prune_backups.go index cde0673..c98ebcf 100644 --- a/cmd/backup/prune_backups.go +++ b/cmd/backup/prune_backups.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "golang.org/x/sync/errgroup" ) @@ -47,7 +48,7 @@ func (s *script) pruneBackups() error { } if err := eg.Wait(); err != nil { - return fmt.Errorf("pruneBackups: error pruning backups: %w", err) + return errwrap.Wrap(err, "error pruning backups") } return nil diff --git a/cmd/backup/run_script.go b/cmd/backup/run_script.go index e734f10..aee9d2c 100644 --- a/cmd/backup/run_script.go +++ b/cmd/backup/run_script.go @@ -6,6 +6,8 @@ package main import ( "errors" "fmt" + + "github.com/offen/docker-volume-backup/internal/errwrap" ) // runScript instantiates a new script object and orchestrates a backup run. @@ -15,7 +17,12 @@ import ( func runScript(c *Config) (err error) { defer func() { if derr := recover(); derr != nil { - err = fmt.Errorf("runScript: unexpected panic running script: %v", derr) + asErr, ok := derr.(error) + if ok { + err = errwrap.Wrap(asErr, "unexpected panic running script") + } else { + err = errwrap.Wrap(nil, fmt.Sprintf("%v", derr)) + } } }() @@ -23,27 +30,27 @@ func runScript(c *Config) (err error) { unlock, lockErr := s.lock("/var/lock/dockervolumebackup.lock") if lockErr != nil { - err = fmt.Errorf("runScript: error acquiring file lock: %w", lockErr) + err = errwrap.Wrap(lockErr, "error acquiring file lock") return } defer func() { if derr := unlock(); derr != nil { - err = errors.Join(err, fmt.Errorf("runScript: error releasing file lock: %w", derr)) + err = errors.Join(err, errwrap.Wrap(derr, "error releasing file lock")) } }() unset, err := s.c.applyEnv() if err != nil { - return fmt.Errorf("runScript: error applying env: %w", err) + return errwrap.Wrap(err, "error applying env") } defer func() { if derr := unset(); derr != nil { - err = errors.Join(err, fmt.Errorf("runScript: error unsetting environment variables: %w", derr)) + err = errors.Join(err, errwrap.Wrap(derr, "error unsetting environment variables")) } }() if initErr := s.init(); initErr != nil { - err = fmt.Errorf("runScript: error instantiating script: %w", initErr) + err = errwrap.Wrap(initErr, "error instantiating script") return } @@ -56,7 +63,7 @@ func runScript(c *Config) (err error) { // similar). defer func() { if derr := restartContainersAndServices(); derr != nil { - err = errors.Join(err, fmt.Errorf("runScript: error restarting containers and services: %w", derr)) + err = errors.Join(err, errwrap.Wrap(derr, "error restarting containers and services")) } }() if err != nil { @@ -82,19 +89,22 @@ func runScript(c *Config) (err error) { if hookErr := s.runHooks(scriptErr); hookErr != nil { if scriptErr != nil { - return fmt.Errorf( - "runScript: error %w executing the script followed by %w calling the registered hooks", - scriptErr, - hookErr, + return errwrap.Wrap( + nil, + fmt.Sprintf( + "error %v executing the script followed by %v calling the registered hooks", + scriptErr, + hookErr, + ), ) } - return fmt.Errorf( - "runScript: the script ran successfully, but an error occurred calling the registered hooks: %w", + return errwrap.Wrap( hookErr, + "the script ran successfully, but an error occurred calling the registered hooks", ) } if scriptErr != nil { - return fmt.Errorf("runScript: error running script: %w", scriptErr) + return errwrap.Wrap(scriptErr, "error running script") } return nil }() diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 492dcb3..862d478 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -12,6 +12,7 @@ import ( "text/template" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/storage/azure" "github.com/offen/docker-volume-backup/internal/storage/dropbox" @@ -80,14 +81,14 @@ func (s *script) init() error { tmplFileName, tErr := template.New("extension").Parse(s.file) if tErr != nil { - return fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr) + return errwrap.Wrap(tErr, "unable to parse backup file extension template") } var bf bytes.Buffer if tErr := tmplFileName.Execute(&bf, map[string]string{ "Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression), }); tErr != nil { - return fmt.Errorf("newScript: error executing backup file extension template: %w", tErr) + return errwrap.Wrap(tErr, "error executing backup file extension template") } s.file = bf.String() @@ -103,12 +104,12 @@ func (s *script) init() error { if !os.IsNotExist(err) || dockerHostSet { cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { - return fmt.Errorf("newScript: failed to create docker client") + return errwrap.Wrap(err, "failed to create docker client") } s.cli = cli s.registerHook(hookLevelPlumbing, func(err error) error { if err := s.cli.Close(); err != nil { - return fmt.Errorf("newScript: failed to close docker client: %w", err) + return errwrap.Wrap(err, "failed to close docker client") } return nil }) @@ -118,8 +119,6 @@ func (s *script) init() error { switch logType { case storage.LogLevelWarning: s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context) - case storage.LogLevelError: - s.logger.Error(fmt.Sprintf(msg, params...), "storage", context) default: s.logger.Info(fmt.Sprintf(msg, params...), "storage", context) } @@ -141,7 +140,7 @@ func (s *script) init() error { } s3Backend, err := s3.NewStorageBackend(s3Config, logFunc) if err != nil { - return fmt.Errorf("newScript: error creating s3 storage backend: %w", err) + return errwrap.Wrap(err, "error creating s3 storage backend") } s.storages = append(s.storages, s3Backend) } @@ -156,7 +155,7 @@ func (s *script) init() error { } webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc) if err != nil { - return fmt.Errorf("newScript: error creating webdav storage backend: %w", err) + return errwrap.Wrap(err, "error creating webdav storage backend") } s.storages = append(s.storages, webdavBackend) } @@ -173,7 +172,7 @@ func (s *script) init() error { } sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc) if err != nil { - return fmt.Errorf("newScript: error creating ssh storage backend: %w", err) + return errwrap.Wrap(err, "error creating ssh storage backend") } s.storages = append(s.storages, sshBackend) } @@ -197,7 +196,7 @@ func (s *script) init() error { } azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc) if err != nil { - return fmt.Errorf("newScript: error creating azure storage backend: %w", err) + return errwrap.Wrap(err, "error creating azure storage backend") } s.storages = append(s.storages, azureBackend) } @@ -214,7 +213,7 @@ func (s *script) init() error { } dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc) if err != nil { - return fmt.Errorf("newScript: error creating dropbox storage backend: %w", err) + return errwrap.Wrap(err, "error creating dropbox storage backend") } s.storages = append(s.storages, dropboxBackend) } @@ -240,14 +239,14 @@ func (s *script) init() error { hookLevel, ok := hookLevels[s.c.NotificationLevel] if !ok { - return fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel) + return errwrap.Wrap(nil, fmt.Sprintf("unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)) } s.hookLevel = hookLevel if len(s.c.NotificationURLs) > 0 { sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...) if senderErr != nil { - return fmt.Errorf("newScript: error creating sender: %w", senderErr) + return errwrap.Wrap(senderErr, "error creating sender") } s.sender = sender @@ -255,13 +254,13 @@ func (s *script) init() error { tmpl.Funcs(templateHelpers) tmpl, err = tmpl.Parse(defaultNotifications) if err != nil { - return fmt.Errorf("newScript: unable to parse default notifications templates: %w", err) + return errwrap.Wrap(err, "unable to parse default notifications templates") } if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() { tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*") if err != nil { - return fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err) + return errwrap.Wrap(err, "unable to parse user defined notifications templates") } } s.template = tmpl diff --git a/cmd/backup/stop_restart.go b/cmd/backup/stop_restart.go index c538755..73af5d2 100644 --- a/cmd/backup/stop_restart.go +++ b/cmd/backup/stop_restart.go @@ -1,3 +1,6 @@ +// Copyright 2024 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + package main import ( @@ -15,24 +18,25 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" + "github.com/offen/docker-volume-backup/internal/errwrap" ) func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) { service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) if err != nil { - return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err) + return nil, errwrap.Wrap(err, fmt.Sprintf("error inspecting service %s", serviceID)) } serviceMode := &service.Spec.Mode switch { case serviceMode.Replicated != nil: serviceMode.Replicated.Replicas = &replicas default: - return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name) + return nil, errwrap.Wrap(nil, fmt.Sprintf("service to be scaled %s has to be in replicated mode", service.Spec.Name)) } response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) if err != nil { - return nil, fmt.Errorf("scaleService: error updating service: %w", err) + return nil, errwrap.Wrap(err, "error updating service") } discardWriter := &noopWriteCloser{io.Discard} @@ -51,11 +55,14 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i for { select { case <-timeout.C: - return fmt.Errorf( - "awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d", - timeoutAfter, - serviceID, - count, + return errwrap.Wrap( + nil, + fmt.Sprintf( + "timed out after waiting %s for service %s to reach desired container count of %d", + timeoutAfter, + serviceID, + count, + ), ) case <-poll.C: containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ @@ -65,7 +72,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i }), }) if err != nil { - return fmt.Errorf("awaitContainerCount: error listing containers: %w", err) + return errwrap.Wrap(err, "error listing containers") } if len(containers) == count { return nil @@ -84,7 +91,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { dockerInfo, err := s.cli.Info(context.Background()) if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err) + return noop, errwrap.Wrap(err, "error getting docker info") } isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive" @@ -97,7 +104,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { "Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.", ) if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok { - return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue") + return noop, errwrap.Wrap(nil, "both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue") } labelValue = s.c.BackupStopContainerLabel } @@ -109,7 +116,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{}) if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err) + return noop, errwrap.Wrap(err, "error querying for containers") } containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Filters: filters.NewArgs(filters.KeyValuePair{ @@ -118,7 +125,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { }), }) if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err) + return noop, errwrap.Wrap(err, "error querying for containers to stop") } var allServices []swarm.Service @@ -126,7 +133,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { if isDockerSwarm { allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{}) if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err) + return noop, errwrap.Wrap(err, "error querying for services") } matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{ Filters: filters.NewArgs(filters.KeyValuePair{ @@ -142,7 +149,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { }) } if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err) + return noop, errwrap.Wrap(err, "error querying for services to scale down") } } @@ -155,14 +162,17 @@ func (s *script) stopContainersAndServices() (func() error, error) { if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok { parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{}) if err != nil { - return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err) + return noop, errwrap.Wrap(err, fmt.Sprintf("error querying for parent service with ID %s", swarmServiceID)) } for label := range parentService.Spec.Labels { if label == "docker-volume-backup.stop-during-backup" { - return noop, fmt.Errorf( - "(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue", - container.Names[0], - parentService.Spec.Name, + return noop, errwrap.Wrap( + nil, + fmt.Sprintf( + "container %s is labeled to stop but has parent service %s which is also labeled, cannot continue", + container.Names[0], + parentService.Spec.Name, + ), ) } } @@ -245,10 +255,12 @@ func (s *script) stopContainersAndServices() (func() error, error) { var initialErr error allErrors := append(stopErrors, scaleDownErrors.value()...) if len(allErrors) != 0 { - initialErr = fmt.Errorf( - "(*script).stopContainersAndServices: %d error(s) stopping containers: %w", - len(allErrors), + initialErr = errwrap.Wrap( errors.Join(allErrors...), + fmt.Sprintf( + "%d error(s) stopping containers", + len(allErrors), + ), ) } @@ -268,7 +280,7 @@ func (s *script) stopContainersAndServices() (func() error, error) { if err != nil { restartErrors = append( restartErrors, - fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err), + errwrap.Wrap(err, "error looking up parent service"), ) continue } @@ -311,10 +323,12 @@ func (s *script) stopContainersAndServices() (func() error, error) { allErrors := append(restartErrors, scaleUpErrors.value()...) if len(allErrors) != 0 { - return fmt.Errorf( - "(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w", - len(allErrors), + return errwrap.Wrap( errors.Join(allErrors...), + fmt.Sprintf( + "%d error(s) restarting containers and services", + len(allErrors), + ), ) } diff --git a/cmd/backup/util.go b/cmd/backup/util.go index 814b961..a564f0f 100644 --- a/cmd/backup/util.go +++ b/cmd/backup/util.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/robfig/cron/v3" ) @@ -23,7 +24,7 @@ func remove(location string) error { if os.IsNotExist(err) { return nil } - return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err) + return errwrap.Wrap(err, fmt.Sprintf("error checking for existence of `%s`", location)) } if fi.IsDir() { err = os.RemoveAll(location) @@ -31,7 +32,7 @@ func remove(location string) error { err = os.Remove(location) } if err != nil { - return fmt.Errorf("remove: error removing `%s`: %w", location, err) + return errwrap.Wrap(err, fmt.Sprintf("error removing `%s", location)) } return nil } @@ -50,7 +51,7 @@ type bufferingWriter struct { func (b *bufferingWriter) Write(p []byte) (n int, err error) { if n, err := b.buf.Write(p); err != nil { - return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err) + return n, errwrap.Wrap(err, "error writing to buffer") } return b.writer.Write(p) } diff --git a/internal/errwrap/wrap.go b/internal/errwrap/wrap.go new file mode 100644 index 0000000..8f39fa4 --- /dev/null +++ b/internal/errwrap/wrap.go @@ -0,0 +1,43 @@ +// Copyright 2024 - Offen Authors +// SPDX-License-Identifier: MPL-2.0 + +package errwrap + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// Wrap wraps the given error using the given message while prepending +// the name of the calling function, creating a poor man's stack trace +func Wrap(err error, msg string) error { + pc := make([]uintptr, 15) + n := runtime.Callers(2, pc) + frames := runtime.CallersFrames(pc[:n]) + frame, _ := frames.Next() + // strip full import paths and just use the package name + chunks := strings.Split(frame.Function, "/") + withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg) + if err == nil { + return fmt.Errorf(withCaller) + } + return fmt.Errorf("%s: %w", withCaller, err) +} + +// Unwrap receives an error and returns the last error in the chain of +// wrapped errors +func Unwrap(err error) error { + if err == nil { + return nil + } + for { + u := errors.Unwrap(err) + if u == nil { + break + } + err = u + } + return err +} diff --git a/internal/storage/azure/azure.go b/internal/storage/azure/azure.go index 00fe3ac..8bc11b0 100644 --- a/internal/storage/azure/azure.go +++ b/internal/storage/azure/azure.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" ) @@ -40,11 +41,11 @@ type Config struct { func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) { endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err) + return nil, errwrap.Wrap(err, "error parsing endpoint template") } var ep bytes.Buffer if err := endpointTemplate.Execute(&ep, opts); err != nil { - return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err) + return nil, errwrap.Wrap(err, "error executing endpoint template") } normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/")) @@ -52,21 +53,21 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error if opts.PrimaryAccountKey != "" { cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err) + return nil, errwrap.Wrap(err, "error creating shared key Azure credential") } client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err) + return nil, errwrap.Wrap(err, "error creating Azure client") } } else { cred, err := azidentity.NewManagedIdentityCredential(nil) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err) + return nil, errwrap.Wrap(err, "error creating managed identity credential") } client, err = azblob.NewClient(normalizedEndpoint, cred, nil) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err) + return nil, errwrap.Wrap(err, "error creating Azure client") } } @@ -90,7 +91,7 @@ func (b *azureBlobStorage) Name() string { func (b *azureBlobStorage) Copy(file string) error { fileReader, err := os.Open(file) if err != nil { - return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err) + return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file)) } _, err = b.client.UploadStream( context.Background(), @@ -100,7 +101,7 @@ func (b *azureBlobStorage) Copy(file string) error { nil, ) if err != nil { - return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err) + return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file)) } return nil } @@ -117,7 +118,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto for pager.More() { resp, err := pager.NextPage(context.Background()) if err != nil { - return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err) + return nil, errwrap.Wrap(err, "error paging over blobs") } for _, v := range resp.Segment.BlobItems { totalCount++ diff --git a/internal/storage/dropbox/dropbox.go b/internal/storage/dropbox/dropbox.go index c6b5b17..1a58a59 100644 --- a/internal/storage/dropbox/dropbox.go +++ b/internal/storage/dropbox/dropbox.go @@ -14,6 +14,7 @@ import ( "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" "golang.org/x/oauth2" ) @@ -51,7 +52,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken}) token, err := tkSource.Token() if err != nil { - return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err) + return nil, errwrap.Wrap(err, "error refreshing token") } dbxConfig := dropbox.Config{ @@ -95,29 +96,28 @@ func (b *dropboxStorage) Copy(file string) error { switch err := err.(type) { case files.CreateFolderV2APIError: if err.EndpointError.Path.Tag != files.WriteErrorConflict { - return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err) + return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath)) } b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath) default: - return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err) + return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath)) } } r, err := os.Open(file) if err != nil { - return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err) + return errwrap.Wrap(err, "error opening the file to be uploaded") } defer r.Close() // Start new upload session and get session id - b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath) var sessionId string uploadSessionStartArg := files.NewUploadSessionStartArg() uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}} if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil { - return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err) + return errwrap.Wrap(err, "error starting the upload session") } else { sessionId = res.SessionId } @@ -165,7 +165,7 @@ loop: bytesRead, err := r.Read(chunk) if err != nil { - errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err) + errorChn <- errwrap.Wrap(err, "error reading the file to be uploaded") mu.Unlock() return } @@ -184,7 +184,7 @@ loop: mu.Unlock() if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil { - errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err) + errorChn <- errwrap.Wrap(err, "error appending the file to the upload session") return } }() @@ -198,7 +198,7 @@ loop: files.NewCommitInfo(filepath.Join(b.DestinationPath, name)), ), nil) if err != nil { - return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err) + return errwrap.Wrap(err, "error finishing the upload session") } b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath) @@ -211,14 +211,14 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora var entries []files.IsMetadata res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath)) if err != nil { - return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err) + return nil, errwrap.Wrap(err, "error looking up candidates from remote storage") } entries = append(entries, res.Entries...) for res.HasMore { res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor)) if err != nil { - return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err) + return nil, errwrap.Wrap(err, "error looking up candidates from remote storage") } entries = append(entries, res.Entries...) } @@ -248,7 +248,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error { for _, match := range matches { if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil { - return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err) + return errwrap.Wrap(err, "error removing file from Dropbox storage") } } return nil diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index d85f4a6..2232a9f 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -12,6 +12,7 @@ import ( "path/filepath" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" ) @@ -47,7 +48,7 @@ func (b *localStorage) Copy(file string) error { _, name := path.Split(file) if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil { - return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err) + return errwrap.Wrap(err, "error copying file to archive") } b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath) @@ -57,7 +58,7 @@ func (b *localStorage) Copy(file string) error { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { - return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err) + return errwrap.Wrap(err, "error creating latest symlink") } b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) } @@ -73,10 +74,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return nil, fmt.Errorf( - "(*localStorage).Prune: Error looking up matching files using pattern %s: %w", - globPattern, + return nil, errwrap.Wrap( err, + fmt.Sprintf( + "error looking up matching files using pattern %s", + globPattern, + ), ) } @@ -84,10 +87,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return nil, fmt.Errorf( - "(*localStorage).Prune: Error calling Lstat on file %s: %w", - candidate, + return nil, errwrap.Wrap( err, + fmt.Sprintf( + "error calling Lstat on file %s", + candidate, + ), ) } @@ -100,10 +105,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return nil, fmt.Errorf( - "(*localStorage).Prune: Error calling stat on file %s: %w", - candidate, + return nil, errwrap.Wrap( err, + fmt.Sprintf( + "error calling stat on file %s", + candidate, + ), ) } if fi.ModTime().Before(deadline) { @@ -124,10 +131,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage } } if len(removeErrors) != 0 { - return fmt.Errorf( - "(*localStorage).Prune: %d error(s) deleting files, starting with: %w", - len(removeErrors), + return errwrap.Wrap( errors.Join(removeErrors...), + fmt.Sprintf( + "%d error(s) deleting files", + len(removeErrors), + ), ) } return nil diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 0578b8c..3e3102f 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -15,6 +15,7 @@ import ( "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" ) @@ -53,7 +54,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error } else if opts.IamRoleEndpoint != "" { creds = credentials.NewIAM(opts.IamRoleEndpoint) } else { - return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") + return nil, errwrap.Wrap(nil, "AWS_S3_BUCKET_NAME is defined, but no credentials were provided") } options := minio.Options{ @@ -63,12 +64,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error transport, err := minio.DefaultTransport(true) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err) + return nil, errwrap.Wrap(err, "failed to create default minio transport") } if opts.EndpointInsecure { if !options.Secure { - return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https") + return nil, errwrap.Wrap(nil, "AWS_ENDPOINT_INSECURE = true is only meaningful for https") } transport.TLSClientConfig.InsecureSkipVerify = true } else if opts.CACert != nil { @@ -81,7 +82,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error mc, err := minio.New(opts.Endpoint, &options) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err) + return nil, errwrap.Wrap(err, "error setting up minio client") } return &s3Storage{ @@ -112,12 +113,12 @@ func (b *s3Storage) Copy(file string) error { if b.partSize > 0 { srcFileInfo, err := os.Stat(file) if err != nil { - return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err) + return errwrap.Wrap(err, "error reading the local file") } _, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024)) if err != nil { - return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err) + return errwrap.Wrap(err, "error computing the optimal s3 part size") } putObjectOptions.PartSize = uint64(partSize) @@ -125,14 +126,17 @@ func (b *s3Storage) Copy(file string) error { if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil { if errResp := minio.ToErrorResponse(err); errResp.Message != "" { - return fmt.Errorf( - "(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", - errResp.Message, - errResp.Code, - errResp.StatusCode, + return errwrap.Wrap( + nil, + fmt.Sprintf( + "error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", + errResp.Message, + errResp.Code, + errResp.StatusCode, + ), ) } - return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err) + return errwrap.Wrap(err, "error uploading backup to remote storage") } b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) @@ -152,9 +156,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return nil, fmt.Errorf( - "(*s3Storage).Prune: error looking up candidates from remote storage! %w", + return nil, errwrap.Wrap( candidate.Err, + "error looking up candidates from remote storage", ) } if candidate.LastModified.Before(deadline) { diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 7a03875..0866e67 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -4,7 +4,6 @@ package ssh import ( - "errors" "fmt" "io" "os" @@ -13,6 +12,7 @@ import ( "strings" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" @@ -47,20 +47,20 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error if _, err := os.Stat(opts.IdentityFile); err == nil { key, err := os.ReadFile(opts.IdentityFile) if err != nil { - return nil, errors.New("NewStorageBackend: error reading the private key") + return nil, errwrap.Wrap(nil, "error reading the private key") } var signer ssh.Signer if opts.IdentityPassphrase != "" { signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase)) if err != nil { - return nil, errors.New("NewStorageBackend: error parsing the encrypted private key") + return nil, errwrap.Wrap(nil, "error parsing the encrypted private key") } authMethods = append(authMethods, ssh.PublicKeys(signer)) } else { signer, err = ssh.ParsePrivateKey(key) if err != nil { - return nil, errors.New("NewStorageBackend: error parsing the private key") + return nil, errwrap.Wrap(nil, "error parsing the private key") } authMethods = append(authMethods, ssh.PublicKeys(signer)) } @@ -74,7 +74,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err) + return nil, errwrap.Wrap(err, "error creating ssh client") } _, _, err = sshClient.SendRequest("keepalive", false, nil) if err != nil { @@ -87,7 +87,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error sftp.MaxConcurrentRequestsPerFile(64), ) if err != nil { - return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err) + return nil, errwrap.Wrap(err, "error creating sftp client") } return &sshStorage{ @@ -111,13 +111,13 @@ func (b *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { - return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err) + return errwrap.Wrap(err, " error reading the file to be uploaded") } defer source.Close() destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name)) if err != nil { - return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err) + return errwrap.Wrap(err, "error creating file") } defer destination.Close() @@ -127,27 +127,27 @@ func (b *sshStorage) Copy(file string) error { if err == io.EOF { tot, err := destination.Write(chunk[:num]) if err != nil { - return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err) + return errwrap.Wrap(err, "error uploading the file") } if tot != len(chunk[:num]) { - return errors.New("(*sshStorage).Copy: failed to write stream") + return errwrap.Wrap(nil, "failed to write stream") } break } if err != nil { - return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err) + return errwrap.Wrap(err, "error uploading the file") } tot, err := destination.Write(chunk[:num]) if err != nil { - return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err) + return errwrap.Wrap(err, "error uploading the file") } if tot != len(chunk[:num]) { - return fmt.Errorf("(*sshStorage).Copy: failed to write stream") + return errwrap.Wrap(nil, "failed to write stream") } } @@ -160,7 +160,7 @@ func (b *sshStorage) Copy(file string) error { func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := b.sftpClient.ReadDir(b.DestinationPath) if err != nil { - return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err) + return nil, errwrap.Wrap(err, "error reading directory") } var matches []string @@ -181,7 +181,7 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error { for _, match := range matches { if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { - return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err) + return errwrap.Wrap(err, "error removing file") } } return nil diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 8da7f1f..e94b5dd 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -4,8 +4,9 @@ package storage import ( - "fmt" "time" + + "github.com/offen/docker-volume-backup/internal/errwrap" ) // Backend is an interface for defining functions which all storage providers support. @@ -26,7 +27,6 @@ type LogLevel int const ( LogLevelInfo LogLevel = iota LogLevelWarning - LogLevelError ) type Log func(logType LogLevel, context string, msg string, params ...any) @@ -47,7 +47,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, formattedDeadline, err := deadline.Local().MarshalText() if err != nil { - return fmt.Errorf("(*StorageBackend).DoPrune: error marshaling deadline: %w", err) + return errwrap.Wrap(err, "error marshaling deadline") } b.Log(LogLevelInfo, context, "Pruned %d out of %d backups as they were older than the given deadline of %s.", diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index c1484ac..a5224c9 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -4,7 +4,6 @@ package webdav import ( - "errors" "fmt" "io/fs" "net/http" @@ -14,6 +13,7 @@ import ( "strings" "time" + "github.com/offen/docker-volume-backup/internal/errwrap" "github.com/offen/docker-volume-backup/internal/storage" "github.com/studio-b12/gowebdav" ) @@ -36,14 +36,14 @@ type Config struct { // NewStorageBackend creates and initializes a new WebDav storage backend. func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) { if opts.Username == "" || opts.Password == "" { - return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided") + return nil, errwrap.Wrap(nil, "WEBDAV_URL is defined, but no credentials were provided") } else { webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password) if opts.URLInsecure { defaultTransport, ok := http.DefaultTransport.(*http.Transport) if !ok { - return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport") + return nil, errwrap.Wrap(nil, "unexpected error when asserting type for http.DefaultTransport") } webdavTransport := defaultTransport.Clone() webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure @@ -69,16 +69,16 @@ func (b *webDavStorage) Name() string { func (b *webDavStorage) Copy(file string) error { _, name := path.Split(file) if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { - return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err) + return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s' on server", b.DestinationPath)) } r, err := os.Open(file) if err != nil { - return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err) + return errwrap.Wrap(err, "error opening the file to be uploaded") } if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil { - return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err) + return errwrap.Wrap(err, "error uploading the file") } b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath) @@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error { func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := b.client.ReadDir(b.DestinationPath) if err != nil { - return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err) + return nil, errwrap.Wrap(err, "error looking up candidates from remote storage") } var matches []fs.FileInfo var lenCandidates int @@ -111,7 +111,7 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error { for _, match := range matches { if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { - return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err) + return errwrap.Wrap(err, "error removing file") } } return nil