mirror of
https://github.com/offen/docker-volume-backup.git
synced 2024-11-22 05:10:28 +01:00
Auto prepend caller when wrapping errors
This commit is contained in:
parent
83fa0aae48
commit
52c22a1891
@ -16,23 +16,23 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/klauspost/pgzip"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
|
return errwrap.Wrap(err, "error transposing given file paths")
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
return errwrap.Wrap(err, "error creating output file path")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
return errwrap.Wrap(err, "error creating archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -58,35 +58,35 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
|||||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
||||||
file, err := os.Create(outFilePath)
|
file, err := os.Create(outFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
return errwrap.Wrap(err, "error creating out file")
|
||||||
}
|
}
|
||||||
|
|
||||||
prefix := path.Dir(outFilePath)
|
prefix := path.Dir(outFilePath)
|
||||||
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error getting compression writer: %w", err)
|
return errwrap.Wrap(err, "error getting compression writer")
|
||||||
}
|
}
|
||||||
tarWriter := tar.NewWriter(compressWriter)
|
tarWriter := tar.NewWriter(compressWriter)
|
||||||
|
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
||||||
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error writing %s to archive", p))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tarWriter.Close()
|
err = tarWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
return errwrap.Wrap(err, "error closing tar writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = compressWriter.Close()
|
err = compressWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing compression writer: %w", err)
|
return errwrap.Wrap(err, "error closing compression writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = file.Close()
|
err = file.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing file: %w", err)
|
return errwrap.Wrap(err, "error closing file")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -97,7 +97,7 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write
|
|||||||
case "gz":
|
case "gz":
|
||||||
w, err := pgzip.NewWriterLevel(file, 5)
|
w, err := pgzip.NewWriterLevel(file, 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: gzip error: %w", err)
|
return nil, errwrap.Wrap(err, "gzip error")
|
||||||
}
|
}
|
||||||
|
|
||||||
if concurrency == 0 {
|
if concurrency == 0 {
|
||||||
@ -105,25 +105,25 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: error setting concurrency: %w", err)
|
return nil, errwrap.Wrap(err, "error setting concurrency")
|
||||||
}
|
}
|
||||||
|
|
||||||
return w, nil
|
return w, nil
|
||||||
case "zst":
|
case "zst":
|
||||||
compressWriter, err := zstd.NewWriter(file)
|
compressWriter, err := zstd.NewWriter(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: zstd error: %w", err)
|
return nil, errwrap.Wrap(err, "zstd error")
|
||||||
}
|
}
|
||||||
return compressWriter, nil
|
return compressWriter, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("getCompressionWriter: unsupported compression algorithm: %s", algo)
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("unsupported compression algorithm: %s", algo))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||||
fileInfo, err := os.Lstat(path)
|
fileInfo, err := os.Lstat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error getting file info for %s", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
@ -134,19 +134,19 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
var err error
|
var err error
|
||||||
if link, err = os.Readlink(path); err != nil {
|
if link, err = os.Readlink(path); err != nil {
|
||||||
return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error resolving symlink %s", path))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
header, err := tar.FileInfoHeader(fileInfo, link)
|
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error getting file info header: %w", err)
|
return errwrap.Wrap(err, "error getting file info header")
|
||||||
}
|
}
|
||||||
header.Name = strings.TrimPrefix(path, prefix)
|
header.Name = strings.TrimPrefix(path, prefix)
|
||||||
|
|
||||||
err = tarWriter.WriteHeader(header)
|
err = tarWriter.WriteHeader(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error writing file info header: %w", err)
|
return errwrap.Wrap(err, "error writing file info header")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fileInfo.Mode().IsRegular() {
|
if !fileInfo.Mode().IsRegular() {
|
||||||
@ -155,13 +155,13 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error opening %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error opening %s", path))
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
_, err = io.Copy(tarWriter, file)
|
_, err = io.Copy(tarWriter, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error copying %s to tar writer", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,12 +32,12 @@ func newCommand() *command {
|
|||||||
func (c *command) runAsCommand() error {
|
func (c *command) runAsCommand() error {
|
||||||
configurations, err := sourceConfiguration(configStrategyEnv)
|
configurations, err := sourceConfiguration(configStrategyEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runAsCommand: error loading env vars: %w", err)
|
return errwrap.Wrap(err, "error loading env vars")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, config := range configurations {
|
for _, config := range configurations {
|
||||||
if err := runScript(config); err != nil {
|
if err := runScript(config); err != nil {
|
||||||
return fmt.Errorf("runAsCommand: error running script: %w", err)
|
return errwrap.Wrap(err, "error running script")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,12 +60,12 @@ func (c *command) runInForeground(opts foregroundOpts) error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
if err := c.schedule(configStrategyConfd); err != nil {
|
if err := c.schedule(configStrategyConfd); err != nil {
|
||||||
return fmt.Errorf("runInForeground: error scheduling: %w", err)
|
return errwrap.Wrap(err, "error scheduling")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.profileCronExpression != "" {
|
if opts.profileCronExpression != "" {
|
||||||
if _, err := c.cr.AddFunc(opts.profileCronExpression, c.profile); err != nil {
|
if _, err := c.cr.AddFunc(opts.profileCronExpression, c.profile); err != nil {
|
||||||
return fmt.Errorf("runInForeground: error adding profiling job: %w", err)
|
return errwrap.Wrap(err, "error adding profiling job")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,7 +82,7 @@ func (c *command) runInForeground(opts foregroundOpts) error {
|
|||||||
return nil
|
return nil
|
||||||
case <-c.reload:
|
case <-c.reload:
|
||||||
if err := c.schedule(configStrategyConfd); err != nil {
|
if err := c.schedule(configStrategyConfd); err != nil {
|
||||||
return fmt.Errorf("runInForeground: error reloading configuration: %w", err)
|
return errwrap.Wrap(err, "error reloading configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,7 +97,7 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
|
|
||||||
configurations, err := sourceConfiguration(strategy)
|
configurations, err := sourceConfiguration(strategy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("schedule: error sourcing configuration: %w", err)
|
return errwrap.Wrap(err, "error sourcing configuration")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cfg := range configurations {
|
for _, cfg := range configurations {
|
||||||
@ -114,7 +115,7 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
fmt.Sprintf(
|
fmt.Sprintf(
|
||||||
"Unexpected error running schedule %s: %v",
|
"Unexpected error running schedule %s: %v",
|
||||||
config.BackupCronExpression,
|
config.BackupCronExpression,
|
||||||
err,
|
errwrap.Unwrap(err),
|
||||||
),
|
),
|
||||||
"error",
|
"error",
|
||||||
err,
|
err,
|
||||||
@ -123,7 +124,7 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error adding schedule %s", config.BackupCronExpression))
|
||||||
}
|
}
|
||||||
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", config.source, config.BackupCronExpression))
|
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", config.source, config.BackupCronExpression))
|
||||||
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
||||||
@ -132,7 +133,7 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("schedule: error scheduling: %w", err)
|
return errwrap.Wrap(err, "error scheduling")
|
||||||
}
|
}
|
||||||
c.schedules = append(c.schedules, id)
|
c.schedules = append(c.schedules, id)
|
||||||
}
|
}
|
||||||
@ -146,7 +147,7 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
func (c *command) must(err error) {
|
func (c *command) must(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error(
|
c.logger.Error(
|
||||||
fmt.Sprintf("Fatal error running command: %v", err),
|
fmt.Sprintf("Fatal error running command: %v", errwrap.Unwrap(err)),
|
||||||
"error",
|
"error",
|
||||||
err,
|
err,
|
||||||
)
|
)
|
||||||
|
@ -11,6 +11,8 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
@ -92,7 +94,7 @@ func (c *CompressionType) Decode(v string) error {
|
|||||||
*c = CompressionType(v)
|
*c = CompressionType(v)
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("config: error decoding compression type %s", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error decoding compression type %s", v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +117,7 @@ func (c *CertDecoder) Decode(v string) error {
|
|||||||
block, _ := pem.Decode(content)
|
block, _ := pem.Decode(content)
|
||||||
cert, err := x509.ParseCertificate(block.Bytes)
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error parsing certificate: %w", err)
|
return errwrap.Wrap(err, "error parsing certificate")
|
||||||
}
|
}
|
||||||
*c = CertDecoder{Cert: cert}
|
*c = CertDecoder{Cert: cert}
|
||||||
return nil
|
return nil
|
||||||
@ -131,7 +133,7 @@ func (r *RegexpDecoder) Decode(v string) error {
|
|||||||
}
|
}
|
||||||
re, err := regexp.Compile(v)
|
re, err := regexp.Compile(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error compiling given regexp `%s`", v))
|
||||||
}
|
}
|
||||||
*r = RegexpDecoder{Re: re}
|
*r = RegexpDecoder{Re: re}
|
||||||
return nil
|
return nil
|
||||||
@ -143,10 +145,10 @@ type NaturalNumber int
|
|||||||
func (n *NaturalNumber) Decode(v string) error {
|
func (n *NaturalNumber) Decode(v string) error {
|
||||||
asInt, err := strconv.Atoi(v)
|
asInt, err := strconv.Atoi(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error converting %s to int", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||||
}
|
}
|
||||||
if asInt <= 0 {
|
if asInt <= 0 {
|
||||||
return fmt.Errorf("config: expected a natural number, got %d", asInt)
|
return errwrap.Wrap(nil, fmt.Sprintf("expected a natural number, got %d", asInt))
|
||||||
}
|
}
|
||||||
*n = NaturalNumber(asInt)
|
*n = NaturalNumber(asInt)
|
||||||
return nil
|
return nil
|
||||||
@ -162,10 +164,10 @@ type WholeNumber int
|
|||||||
func (n *WholeNumber) Decode(v string) error {
|
func (n *WholeNumber) Decode(v string) error {
|
||||||
asInt, err := strconv.Atoi(v)
|
asInt, err := strconv.Atoi(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error converting %s to int", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||||
}
|
}
|
||||||
if asInt < 0 {
|
if asInt < 0 {
|
||||||
return fmt.Errorf("config: expected a whole, positive number, including zero. Got %d", asInt)
|
return errwrap.Wrap(nil, fmt.Sprintf("expected a whole, positive number, including zero. Got %d", asInt))
|
||||||
}
|
}
|
||||||
*n = WholeNumber(asInt)
|
*n = WholeNumber(asInt)
|
||||||
return nil
|
return nil
|
||||||
@ -191,12 +193,12 @@ func (c *Config) applyEnv() (func() error, error) {
|
|||||||
for _, lookup := range lookups {
|
for _, lookup := range lookups {
|
||||||
if !lookup.ok {
|
if !lookup.ok {
|
||||||
if err := os.Unsetenv(lookup.key); err != nil {
|
if err := os.Unsetenv(lookup.key); err != nil {
|
||||||
return fmt.Errorf("(*Config).applyEnv: error unsetting env var %s: %w", lookup.key, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error unsetting env var %s", lookup.key))
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := os.Setenv(lookup.key, lookup.value); err != nil {
|
if err := os.Setenv(lookup.key, lookup.value); err != nil {
|
||||||
return fmt.Errorf("(*Config).applyEnv: error setting back env var %s: %w", lookup.key, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error setting back env var %s", lookup.key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -206,7 +208,7 @@ func (c *Config) applyEnv() (func() error, error) {
|
|||||||
current, ok := os.LookupEnv(key)
|
current, ok := os.LookupEnv(key)
|
||||||
lookups = append(lookups, envVarLookup{ok: ok, key: key, value: current})
|
lookups = append(lookups, envVarLookup{ok: ok, key: key, value: current})
|
||||||
if err := os.Setenv(key, value); err != nil {
|
if err := os.Setenv(key, value); err != nil {
|
||||||
return unset, fmt.Errorf("(*Config).applyEnv: error setting env var: %w", err)
|
return unset, errwrap.Wrap(err, "error setting env var")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return unset, nil
|
return unset, nil
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/envconfig"
|
"github.com/offen/envconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -33,11 +34,11 @@ func sourceConfiguration(strategy configStrategy) ([]*Config, error) {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return sourceConfiguration(configStrategyEnv)
|
return sourceConfiguration(configStrategyEnv)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("sourceConfiguration: error loading config files: %w", err)
|
return nil, errwrap.Wrap(err, "error loading config files")
|
||||||
}
|
}
|
||||||
return cs, nil
|
return cs, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("sourceConfiguration: received unknown config strategy: %v", strategy)
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("received unknown config strategy: %v", strategy))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,7 +69,7 @@ func loadConfig(lookup envProxy) (*Config, error) {
|
|||||||
|
|
||||||
var c = &Config{}
|
var c = &Config{}
|
||||||
if err := envconfig.Process("", c); err != nil {
|
if err := envconfig.Process("", c); err != nil {
|
||||||
return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err)
|
return nil, errwrap.Wrap(err, "failed to process configuration values")
|
||||||
}
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
@ -77,7 +78,7 @@ func loadConfig(lookup envProxy) (*Config, error) {
|
|||||||
func loadConfigFromEnvVars() (*Config, error) {
|
func loadConfigFromEnvVars() (*Config, error) {
|
||||||
c, err := loadConfig(os.LookupEnv)
|
c, err := loadConfig(os.LookupEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvVars: error loading config from environment: %w", err)
|
return nil, errwrap.Wrap(err, "error loading config from environment")
|
||||||
}
|
}
|
||||||
c.source = "from environment"
|
c.source = "from environment"
|
||||||
return c, nil
|
return c, nil
|
||||||
@ -89,7 +90,7 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err)
|
return nil, errwrap.Wrap(err, "failed to read files from env directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
configs := []*Config{}
|
configs := []*Config{}
|
||||||
@ -100,11 +101,11 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) {
|
|||||||
p := filepath.Join(directory, item.Name())
|
p := filepath.Join(directory, item.Name())
|
||||||
f, err := os.ReadFile(p)
|
f, err := os.ReadFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvFiles: error reading %s: %w", item.Name(), err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error reading %s", item.Name()))
|
||||||
}
|
}
|
||||||
envFile, err := godotenv.Unmarshal(os.ExpandEnv(string(f)))
|
envFile, err := godotenv.Unmarshal(os.ExpandEnv(string(f)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error reading config file %s", p))
|
||||||
}
|
}
|
||||||
lookup := func(key string) (string, bool) {
|
lookup := func(key string) (string, bool) {
|
||||||
val, ok := envFile[key]
|
val, ok := envFile[key]
|
||||||
@ -115,7 +116,7 @@ func loadConfigsFromEnvFiles(directory string) ([]*Config, error) {
|
|||||||
}
|
}
|
||||||
c, err := loadConfig(lookup)
|
c, err := loadConfig(lookup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error loading config from file %s", p))
|
||||||
}
|
}
|
||||||
c.source = item.Name()
|
c.source = item.Name()
|
||||||
c.additionalEnvVars = envFile
|
c.additionalEnvVars = envFile
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -16,7 +16,7 @@ import (
|
|||||||
func (s *script) copyArchive() error {
|
func (s *script) copyArchive() error {
|
||||||
_, name := path.Split(s.file)
|
_, name := path.Split(s.file)
|
||||||
if stat, err := os.Stat(s.file); err != nil {
|
if stat, err := os.Stat(s.file); err != nil {
|
||||||
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
|
return errwrap.Wrap(err, "unable to stat backup file")
|
||||||
} else {
|
} else {
|
||||||
size := stat.Size()
|
size := stat.Size()
|
||||||
s.stats.BackupFile = BackupFileStats{
|
s.stats.BackupFile = BackupFileStats{
|
||||||
@ -34,7 +34,7 @@ func (s *script) copyArchive() error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
return fmt.Errorf("copyArchive: error copying archive: %w", err)
|
return errwrap.Wrap(err, "error copying archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"io/fs"
|
"io/fs"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/otiai10/copy"
|
"github.com/otiai10/copy"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ func (s *script) createArchive() error {
|
|||||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
if err := remove(backupSources); err != nil {
|
if err := remove(backupSources); err != nil {
|
||||||
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
return errwrap.Wrap(err, "error removing snapshot")
|
||||||
}
|
}
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||||
@ -38,7 +39,7 @@ func (s *script) createArchive() error {
|
|||||||
PreserveTimes: true,
|
PreserveTimes: true,
|
||||||
PreserveOwner: true,
|
PreserveOwner: true,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
return errwrap.Wrap(err, "error creating snapshot")
|
||||||
}
|
}
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||||
@ -48,7 +49,7 @@ func (s *script) createArchive() error {
|
|||||||
tarFile := s.file
|
tarFile := s.file
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
if err := remove(tarFile); err != nil {
|
if err := remove(tarFile); err != nil {
|
||||||
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
return errwrap.Wrap(err, "error removing tar file")
|
||||||
}
|
}
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||||
@ -58,7 +59,7 @@ func (s *script) createArchive() error {
|
|||||||
|
|
||||||
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
|
return errwrap.Wrap(err, "error getting absolute path")
|
||||||
}
|
}
|
||||||
|
|
||||||
var filesEligibleForBackup []string
|
var filesEligibleForBackup []string
|
||||||
@ -73,11 +74,11 @@ func (s *script) createArchive() error {
|
|||||||
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
return errwrap.Wrap(err, "error walking filesystem tree")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
||||||
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
return errwrap.Wrap(err, "error compressing backup folder")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
|
|
||||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
||||||
@ -23,7 +24,7 @@ func (s *script) encryptArchive() error {
|
|||||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
if err := remove(gpgFile); err != nil {
|
if err := remove(gpgFile); err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
return errwrap.Wrap(err, "error removing gpg file")
|
||||||
}
|
}
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||||
@ -33,7 +34,7 @@ func (s *script) encryptArchive() error {
|
|||||||
|
|
||||||
outFile, err := os.Create(gpgFile)
|
outFile, err := os.Create(gpgFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
|
return errwrap.Wrap(err, "error opening out file")
|
||||||
}
|
}
|
||||||
defer outFile.Close()
|
defer outFile.Close()
|
||||||
|
|
||||||
@ -42,17 +43,17 @@ func (s *script) encryptArchive() error {
|
|||||||
FileName: name,
|
FileName: name,
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
|
return errwrap.Wrap(err, "error encrypting backup file")
|
||||||
}
|
}
|
||||||
defer dst.Close()
|
defer dst.Close()
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
src, err := os.Open(s.file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
|
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.file = gpgFile
|
s.file = gpgFile
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -35,12 +36,12 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
|||||||
User: user,
|
User: user,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
||||||
}
|
}
|
||||||
defer resp.Close()
|
defer resp.Close()
|
||||||
|
|
||||||
@ -53,25 +54,25 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if err := <-outputDone; err != nil {
|
if err := <-outputDone; err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error demultiplexing output")
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout, err := io.ReadAll(&outBuf)
|
stdout, err := io.ReadAll(&outBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error reading stdout")
|
||||||
}
|
}
|
||||||
stderr, err := io.ReadAll(&errBuf)
|
stderr, err := io.ReadAll(&errBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error reading stderr")
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error inspecting container exec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.ExitCode > 0 {
|
if res.ExitCode > 0 {
|
||||||
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
|
return stdout, stderr, errwrap.Wrap(nil, fmt.Sprintf("running command exited %d", res.ExitCode))
|
||||||
}
|
}
|
||||||
|
|
||||||
return stdout, stderr, nil
|
return stdout, stderr, nil
|
||||||
@ -91,7 +92,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
var hasDeprecatedContainers bool
|
var hasDeprecatedContainers bool
|
||||||
@ -104,7 +105,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
if len(deprecatedContainers) != 0 {
|
if len(deprecatedContainers) != 0 {
|
||||||
hasDeprecatedContainers = true
|
hasDeprecatedContainers = true
|
||||||
@ -121,7 +122,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
if len(deprecatedContainers) != 0 {
|
if len(deprecatedContainers) != 0 {
|
||||||
hasDeprecatedContainers = true
|
hasDeprecatedContainers = true
|
||||||
@ -164,14 +165,14 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
os.Stdout.Write(stdout)
|
os.Stdout.Write(stdout)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
return errwrap.Wrap(err, "error executing command")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
if err := g.Wait(); err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
return errwrap.Wrap(err, "error from errgroup")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -191,12 +192,12 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
|
|||||||
}
|
}
|
||||||
return func() (err error) {
|
return func() (err error) {
|
||||||
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||||
err = fmt.Errorf("(*script).withLabeledCommands: %s: error running pre commands: %w", step, err)
|
err = errwrap.Wrap(err, fmt.Sprintf("error running %s-pre commands", step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)); derr != nil {
|
if derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)); derr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("(*script).withLabeledCommands: error running %s-post commands: %w", step, derr))
|
err = errors.Join(err, errwrap.Wrap(derr, fmt.Sprintf("error running %s-post commands", step)))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = cb()
|
err = cb()
|
||||||
|
@ -5,8 +5,9 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hook contains a queued action that can be trigger them when the script
|
// hook contains a queued action that can be trigger them when the script
|
||||||
@ -47,7 +48,7 @@ func (s *script) runHooks(err error) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if actionErr := hook.action(err); actionErr != nil {
|
if actionErr := hook.action(err); actionErr != nil {
|
||||||
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
actionErrors = append(actionErrors, errwrap.Wrap(actionErr, "error running hook"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(actionErrors) != 0 {
|
if len(actionErrors) != 0 {
|
||||||
|
@ -4,11 +4,11 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gofrs/flock"
|
"github.com/gofrs/flock"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
|||||||
for {
|
for {
|
||||||
acquired, err := fileLock.TryLock()
|
acquired, err := fileLock.TryLock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
|
return noop, errwrap.Wrap(err, "error trying to lock")
|
||||||
}
|
}
|
||||||
if acquired {
|
if acquired {
|
||||||
if s.encounteredLock {
|
if s.encounteredLock {
|
||||||
@ -54,7 +54,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
|||||||
case <-retry.C:
|
case <-retry.C:
|
||||||
continue
|
continue
|
||||||
case <-deadline.C:
|
case <-deadline.C:
|
||||||
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
return noop, errwrap.Wrap(nil, "timed out waiting for lockfile to become available")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed notifications.tmpl
|
//go:embed notifications.tmpl
|
||||||
@ -37,16 +38,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er
|
|||||||
|
|
||||||
titleBuf := &bytes.Buffer{}
|
titleBuf := &bytes.Buffer{}
|
||||||
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
||||||
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", titleTemplate))
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyBuf := &bytes.Buffer{}
|
bodyBuf := &bytes.Buffer{}
|
||||||
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
||||||
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", bodyTemplate))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
||||||
return fmt.Errorf("notify: error notifying: %w", err)
|
return errwrap.Wrap(err, "error sending notification")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -70,7 +71,7 @@ func (s *script) sendNotification(title, body string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
|
return errwrap.Wrap(errors.Join(errs...), "error sending message")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +48,7 @@ func (s *script) pruneBackups() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
|
return errwrap.Wrap(err, "error pruning backups")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -6,6 +6,8 @@ package main
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// runScript instantiates a new script object and orchestrates a backup run.
|
// runScript instantiates a new script object and orchestrates a backup run.
|
||||||
@ -15,7 +17,12 @@ import (
|
|||||||
func runScript(c *Config) (err error) {
|
func runScript(c *Config) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if derr := recover(); derr != nil {
|
if derr := recover(); derr != nil {
|
||||||
err = fmt.Errorf("runScript: unexpected panic running script: %v", derr)
|
asErr, ok := derr.(error)
|
||||||
|
if ok {
|
||||||
|
err = errwrap.Wrap(asErr, "unexpected panic running script")
|
||||||
|
} else {
|
||||||
|
err = errwrap.Wrap(nil, fmt.Sprintf("%v", derr))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -23,27 +30,27 @@ func runScript(c *Config) (err error) {
|
|||||||
|
|
||||||
unlock, lockErr := s.lock("/var/lock/dockervolumebackup.lock")
|
unlock, lockErr := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
if lockErr != nil {
|
if lockErr != nil {
|
||||||
err = fmt.Errorf("runScript: error acquiring file lock: %w", lockErr)
|
err = errwrap.Wrap(lockErr, "error acquiring file lock")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if derr := unlock(); derr != nil {
|
if derr := unlock(); derr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("runScript: error releasing file lock: %w", derr))
|
err = errors.Join(err, errwrap.Wrap(derr, "error releasing file lock"))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
unset, err := s.c.applyEnv()
|
unset, err := s.c.applyEnv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runScript: error applying env: %w", err)
|
return errwrap.Wrap(err, "error applying env")
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if derr := unset(); derr != nil {
|
if derr := unset(); derr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("runScript: error unsetting environment variables: %w", derr))
|
err = errors.Join(err, errwrap.Wrap(derr, "error unsetting environment variables"))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if initErr := s.init(); initErr != nil {
|
if initErr := s.init(); initErr != nil {
|
||||||
err = fmt.Errorf("runScript: error instantiating script: %w", initErr)
|
err = errwrap.Wrap(initErr, "error instantiating script")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +63,7 @@ func runScript(c *Config) (err error) {
|
|||||||
// similar).
|
// similar).
|
||||||
defer func() {
|
defer func() {
|
||||||
if derr := restartContainersAndServices(); derr != nil {
|
if derr := restartContainersAndServices(); derr != nil {
|
||||||
err = errors.Join(err, fmt.Errorf("runScript: error restarting containers and services: %w", derr))
|
err = errors.Join(err, errwrap.Wrap(derr, "error restarting containers and services"))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -82,19 +89,22 @@ func runScript(c *Config) (err error) {
|
|||||||
|
|
||||||
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
||||||
if scriptErr != nil {
|
if scriptErr != nil {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"runScript: error %w executing the script followed by %w calling the registered hooks",
|
nil,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error %v executing the script followed by %v calling the registered hooks",
|
||||||
scriptErr,
|
scriptErr,
|
||||||
hookErr,
|
hookErr,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"runScript: the script ran successfully, but an error occurred calling the registered hooks: %w",
|
|
||||||
hookErr,
|
hookErr,
|
||||||
|
"the script ran successfully, but an error occurred calling the registered hooks",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if scriptErr != nil {
|
if scriptErr != nil {
|
||||||
return fmt.Errorf("runScript: error running script: %w", scriptErr)
|
return errwrap.Wrap(scriptErr, "error running script")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
||||||
@ -80,14 +81,14 @@ func (s *script) init() error {
|
|||||||
|
|
||||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||||
if tErr != nil {
|
if tErr != nil {
|
||||||
return fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
return errwrap.Wrap(tErr, "unable to parse backup file extension template")
|
||||||
}
|
}
|
||||||
|
|
||||||
var bf bytes.Buffer
|
var bf bytes.Buffer
|
||||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||||
}); tErr != nil {
|
}); tErr != nil {
|
||||||
return fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||||
}
|
}
|
||||||
s.file = bf.String()
|
s.file = bf.String()
|
||||||
|
|
||||||
@ -103,12 +104,12 @@ func (s *script) init() error {
|
|||||||
if !os.IsNotExist(err) || dockerHostSet {
|
if !os.IsNotExist(err) || dockerHostSet {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: failed to create docker client")
|
return errwrap.Wrap(err, "failed to create docker client")
|
||||||
}
|
}
|
||||||
s.cli = cli
|
s.cli = cli
|
||||||
s.registerHook(hookLevelPlumbing, func(err error) error {
|
s.registerHook(hookLevelPlumbing, func(err error) error {
|
||||||
if err := s.cli.Close(); err != nil {
|
if err := s.cli.Close(); err != nil {
|
||||||
return fmt.Errorf("newScript: failed to close docker client: %w", err)
|
return errwrap.Wrap(err, "failed to close docker client")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -118,8 +119,6 @@ func (s *script) init() error {
|
|||||||
switch logType {
|
switch logType {
|
||||||
case storage.LogLevelWarning:
|
case storage.LogLevelWarning:
|
||||||
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
case storage.LogLevelError:
|
|
||||||
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
|
|
||||||
default:
|
default:
|
||||||
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
}
|
}
|
||||||
@ -141,7 +140,7 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating s3 storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, s3Backend)
|
s.storages = append(s.storages, s3Backend)
|
||||||
}
|
}
|
||||||
@ -156,7 +155,7 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating webdav storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, webdavBackend)
|
s.storages = append(s.storages, webdavBackend)
|
||||||
}
|
}
|
||||||
@ -173,7 +172,7 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating ssh storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, sshBackend)
|
s.storages = append(s.storages, sshBackend)
|
||||||
}
|
}
|
||||||
@ -197,7 +196,7 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating azure storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, azureBackend)
|
s.storages = append(s.storages, azureBackend)
|
||||||
}
|
}
|
||||||
@ -214,7 +213,7 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating dropbox storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, dropboxBackend)
|
s.storages = append(s.storages, dropboxBackend)
|
||||||
}
|
}
|
||||||
@ -240,14 +239,14 @@ func (s *script) init() error {
|
|||||||
|
|
||||||
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
return errwrap.Wrap(nil, fmt.Sprintf("unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel))
|
||||||
}
|
}
|
||||||
s.hookLevel = hookLevel
|
s.hookLevel = hookLevel
|
||||||
|
|
||||||
if len(s.c.NotificationURLs) > 0 {
|
if len(s.c.NotificationURLs) > 0 {
|
||||||
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||||
if senderErr != nil {
|
if senderErr != nil {
|
||||||
return fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
return errwrap.Wrap(senderErr, "error creating sender")
|
||||||
}
|
}
|
||||||
s.sender = sender
|
s.sender = sender
|
||||||
|
|
||||||
@ -255,13 +254,13 @@ func (s *script) init() error {
|
|||||||
tmpl.Funcs(templateHelpers)
|
tmpl.Funcs(templateHelpers)
|
||||||
tmpl, err = tmpl.Parse(defaultNotifications)
|
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
return errwrap.Wrap(err, "unable to parse default notifications templates")
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||||
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
return errwrap.Wrap(err, "unable to parse user defined notifications templates")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.template = tmpl
|
s.template = tmpl
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -15,24 +18,25 @@ import (
|
|||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
||||||
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error inspecting service %s", serviceID))
|
||||||
}
|
}
|
||||||
serviceMode := &service.Spec.Mode
|
serviceMode := &service.Spec.Mode
|
||||||
switch {
|
switch {
|
||||||
case serviceMode.Replicated != nil:
|
case serviceMode.Replicated != nil:
|
||||||
serviceMode.Replicated.Replicas = &replicas
|
serviceMode.Replicated.Replicas = &replicas
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name)
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("service to be scaled %s has to be in replicated mode", service.Spec.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("scaleService: error updating service: %w", err)
|
return nil, errwrap.Wrap(err, "error updating service")
|
||||||
}
|
}
|
||||||
|
|
||||||
discardWriter := &noopWriteCloser{io.Discard}
|
discardWriter := &noopWriteCloser{io.Discard}
|
||||||
@ -51,11 +55,14 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d",
|
nil,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"timed out after waiting %s for service %s to reach desired container count of %d",
|
||||||
timeoutAfter,
|
timeoutAfter,
|
||||||
serviceID,
|
serviceID,
|
||||||
count,
|
count,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
case <-poll.C:
|
case <-poll.C:
|
||||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
@ -65,7 +72,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("awaitContainerCount: error listing containers: %w", err)
|
return errwrap.Wrap(err, "error listing containers")
|
||||||
}
|
}
|
||||||
if len(containers) == count {
|
if len(containers) == count {
|
||||||
return nil
|
return nil
|
||||||
@ -84,7 +91,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
|
|
||||||
dockerInfo, err := s.cli.Info(context.Background())
|
dockerInfo, err := s.cli.Info(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
|
return noop, errwrap.Wrap(err, "error getting docker info")
|
||||||
}
|
}
|
||||||
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
|
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
|
||||||
|
|
||||||
@ -97,7 +104,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
||||||
)
|
)
|
||||||
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
||||||
return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
return noop, errwrap.Wrap(nil, "both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
||||||
}
|
}
|
||||||
labelValue = s.c.BackupStopContainerLabel
|
labelValue = s.c.BackupStopContainerLabel
|
||||||
}
|
}
|
||||||
@ -109,7 +116,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
@ -118,7 +125,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for containers to stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
var allServices []swarm.Service
|
var allServices []swarm.Service
|
||||||
@ -126,7 +133,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if isDockerSwarm {
|
if isDockerSwarm {
|
||||||
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for services")
|
||||||
}
|
}
|
||||||
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
@ -142,7 +149,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for services to scale down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,14 +162,17 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
||||||
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
|
return noop, errwrap.Wrap(err, fmt.Sprintf("error querying for parent service with ID %s", swarmServiceID))
|
||||||
}
|
}
|
||||||
for label := range parentService.Spec.Labels {
|
for label := range parentService.Spec.Labels {
|
||||||
if label == "docker-volume-backup.stop-during-backup" {
|
if label == "docker-volume-backup.stop-during-backup" {
|
||||||
return noop, fmt.Errorf(
|
return noop, errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
nil,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
||||||
container.Names[0],
|
container.Names[0],
|
||||||
parentService.Spec.Name,
|
parentService.Spec.Name,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,10 +255,12 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
var initialErr error
|
var initialErr error
|
||||||
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
||||||
if len(allErrors) != 0 {
|
if len(allErrors) != 0 {
|
||||||
initialErr = fmt.Errorf(
|
initialErr = errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
|
|
||||||
len(allErrors),
|
|
||||||
errors.Join(allErrors...),
|
errors.Join(allErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) stopping containers",
|
||||||
|
len(allErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,7 +280,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
restartErrors = append(
|
restartErrors = append(
|
||||||
restartErrors,
|
restartErrors,
|
||||||
fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err),
|
errwrap.Wrap(err, "error looking up parent service"),
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -311,10 +323,12 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
|
|
||||||
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
||||||
if len(allErrors) != 0 {
|
if len(allErrors) != 0 {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w",
|
|
||||||
len(allErrors),
|
|
||||||
errors.Join(allErrors...),
|
errors.Join(allErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) restarting containers and services",
|
||||||
|
len(allErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,7 +24,7 @@ func remove(location string) error {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error checking for existence of `%s`", location))
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
err = os.RemoveAll(location)
|
err = os.RemoveAll(location)
|
||||||
@ -31,7 +32,7 @@ func remove(location string) error {
|
|||||||
err = os.Remove(location)
|
err = os.Remove(location)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error removing `%s", location))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -50,7 +51,7 @@ type bufferingWriter struct {
|
|||||||
|
|
||||||
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||||
if n, err := b.buf.Write(p); err != nil {
|
if n, err := b.buf.Write(p); err != nil {
|
||||||
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
|
return n, errwrap.Wrap(err, "error writing to buffer")
|
||||||
}
|
}
|
||||||
return b.writer.Write(p)
|
return b.writer.Write(p)
|
||||||
}
|
}
|
||||||
|
43
internal/errwrap/wrap.go
Normal file
43
internal/errwrap/wrap.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package errwrap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wrap wraps the given error using the given message while prepending
|
||||||
|
// the name of the calling function, creating a poor man's stack trace
|
||||||
|
func Wrap(err error, msg string) error {
|
||||||
|
pc := make([]uintptr, 15)
|
||||||
|
n := runtime.Callers(2, pc)
|
||||||
|
frames := runtime.CallersFrames(pc[:n])
|
||||||
|
frame, _ := frames.Next()
|
||||||
|
// strip full import paths and just use the package name
|
||||||
|
chunks := strings.Split(frame.Function, "/")
|
||||||
|
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf(withCaller)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%s: %w", withCaller, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap receives an error and returns the last error in the chain of
|
||||||
|
// wrapped errors
|
||||||
|
func Unwrap(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
u := errors.Unwrap(err)
|
||||||
|
if u == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = u
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,11 +41,11 @@ type Config struct {
|
|||||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
|
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
||||||
}
|
}
|
||||||
var ep bytes.Buffer
|
var ep bytes.Buffer
|
||||||
if err := endpointTemplate.Execute(&ep, opts); err != nil {
|
if err := endpointTemplate.Execute(&ep, opts); err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
|
return nil, errwrap.Wrap(err, "error executing endpoint template")
|
||||||
}
|
}
|
||||||
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
|
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
|
||||||
|
|
||||||
@ -52,21 +53,21 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
if opts.PrimaryAccountKey != "" {
|
if opts.PrimaryAccountKey != "" {
|
||||||
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
|
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
|
return nil, errwrap.Wrap(err, "error creating shared key Azure credential")
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating Azure client")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
|
return nil, errwrap.Wrap(err, "error creating managed identity credential")
|
||||||
}
|
}
|
||||||
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating Azure client")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +91,7 @@ func (b *azureBlobStorage) Name() string {
|
|||||||
func (b *azureBlobStorage) Copy(file string) error {
|
func (b *azureBlobStorage) Copy(file string) error {
|
||||||
fileReader, err := os.Open(file)
|
fileReader, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
||||||
}
|
}
|
||||||
_, err = b.client.UploadStream(
|
_, err = b.client.UploadStream(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -100,7 +101,7 @@ func (b *azureBlobStorage) Copy(file string) error {
|
|||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -117,7 +118,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
|
|||||||
for pager.More() {
|
for pager.More() {
|
||||||
resp, err := pager.NextPage(context.Background())
|
resp, err := pager.NextPage(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
|
return nil, errwrap.Wrap(err, "error paging over blobs")
|
||||||
}
|
}
|
||||||
for _, v := range resp.Segment.BlobItems {
|
for _, v := range resp.Segment.BlobItems {
|
||||||
totalCount++
|
totalCount++
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@ -51,7 +52,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
||||||
token, err := tkSource.Token()
|
token, err := tkSource.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err)
|
return nil, errwrap.Wrap(err, "error refreshing token")
|
||||||
}
|
}
|
||||||
|
|
||||||
dbxConfig := dropbox.Config{
|
dbxConfig := dropbox.Config{
|
||||||
@ -95,29 +96,28 @@ func (b *dropboxStorage) Copy(file string) error {
|
|||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case files.CreateFolderV2APIError:
|
case files.CreateFolderV2APIError:
|
||||||
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := os.Open(file)
|
r, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
// Start new upload session and get session id
|
// Start new upload session and get session id
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
var sessionId string
|
var sessionId string
|
||||||
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||||
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
||||||
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err)
|
return errwrap.Wrap(err, "error starting the upload session")
|
||||||
} else {
|
} else {
|
||||||
sessionId = res.SessionId
|
sessionId = res.SessionId
|
||||||
}
|
}
|
||||||
@ -165,7 +165,7 @@ loop:
|
|||||||
|
|
||||||
bytesRead, err := r.Read(chunk)
|
bytesRead, err := r.Read(chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err)
|
errorChn <- errwrap.Wrap(err, "error reading the file to be uploaded")
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ loop:
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
||||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err)
|
errorChn <- errwrap.Wrap(err, "error appending the file to the upload session")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -198,7 +198,7 @@ loop:
|
|||||||
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||||
), nil)
|
), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
return errwrap.Wrap(err, "error finishing the upload session")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
@ -211,14 +211,14 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
|||||||
var entries []files.IsMetadata
|
var entries []files.IsMetadata
|
||||||
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
entries = append(entries, res.Entries...)
|
entries = append(entries, res.Entries...)
|
||||||
|
|
||||||
for res.HasMore {
|
for res.HasMore {
|
||||||
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
entries = append(entries, res.Entries...)
|
entries = append(entries, res.Entries...)
|
||||||
}
|
}
|
||||||
@ -248,7 +248,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
return errwrap.Wrap(err, "error removing file from Dropbox storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +48,7 @@ func (b *localStorage) Copy(file string) error {
|
|||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
|
|
||||||
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
||||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
|
return errwrap.Wrap(err, "error copying file to archive")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
||||||
|
|
||||||
@ -57,7 +58,7 @@ func (b *localStorage) Copy(file string) error {
|
|||||||
os.Remove(symlink)
|
os.Remove(symlink)
|
||||||
}
|
}
|
||||||
if err := os.Symlink(name, symlink); err != nil {
|
if err := os.Symlink(name, symlink); err != nil {
|
||||||
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
|
return errwrap.Wrap(err, "error creating latest symlink")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
|
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
|
||||||
}
|
}
|
||||||
@ -73,10 +74,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
)
|
)
|
||||||
globMatches, err := filepath.Glob(globPattern)
|
globMatches, err := filepath.Glob(globPattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
|
|
||||||
globPattern,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error looking up matching files using pattern %s",
|
||||||
|
globPattern,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,10 +87,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
for _, candidate := range globMatches {
|
for _, candidate := range globMatches {
|
||||||
fi, err := os.Lstat(candidate)
|
fi, err := os.Lstat(candidate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error calling Lstat on file %s",
|
||||||
|
candidate,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,10 +105,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
for _, candidate := range candidates {
|
for _, candidate := range candidates {
|
||||||
fi, err := os.Stat(candidate)
|
fi, err := os.Stat(candidate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error calling stat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error calling stat on file %s",
|
||||||
|
candidate,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if fi.ModTime().Before(deadline) {
|
if fi.ModTime().Before(deadline) {
|
||||||
@ -124,10 +131,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(removeErrors) != 0 {
|
if len(removeErrors) != 0 {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
|
|
||||||
len(removeErrors),
|
|
||||||
errors.Join(removeErrors...),
|
errors.Join(removeErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) deleting files",
|
||||||
|
len(removeErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,7 +54,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
} else if opts.IamRoleEndpoint != "" {
|
} else if opts.IamRoleEndpoint != "" {
|
||||||
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
return nil, errwrap.Wrap(nil, "AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
options := minio.Options{
|
options := minio.Options{
|
||||||
@ -63,12 +64,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
|
|
||||||
transport, err := minio.DefaultTransport(true)
|
transport, err := minio.DefaultTransport(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
|
return nil, errwrap.Wrap(err, "failed to create default minio transport")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.EndpointInsecure {
|
if opts.EndpointInsecure {
|
||||||
if !options.Secure {
|
if !options.Secure {
|
||||||
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
return nil, errwrap.Wrap(nil, "AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||||
}
|
}
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
} else if opts.CACert != nil {
|
} else if opts.CACert != nil {
|
||||||
@ -81,7 +82,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
|
|
||||||
mc, err := minio.New(opts.Endpoint, &options)
|
mc, err := minio.New(opts.Endpoint, &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
|
return nil, errwrap.Wrap(err, "error setting up minio client")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &s3Storage{
|
return &s3Storage{
|
||||||
@ -112,12 +113,12 @@ func (b *s3Storage) Copy(file string) error {
|
|||||||
if b.partSize > 0 {
|
if b.partSize > 0 {
|
||||||
srcFileInfo, err := os.Stat(file)
|
srcFileInfo, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
|
return errwrap.Wrap(err, "error reading the local file")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
|
return errwrap.Wrap(err, "error computing the optimal s3 part size")
|
||||||
}
|
}
|
||||||
|
|
||||||
putObjectOptions.PartSize = uint64(partSize)
|
putObjectOptions.PartSize = uint64(partSize)
|
||||||
@ -125,14 +126,17 @@ func (b *s3Storage) Copy(file string) error {
|
|||||||
|
|
||||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
nil,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||||
errResp.Message,
|
errResp.Message,
|
||||||
errResp.Code,
|
errResp.Code,
|
||||||
errResp.StatusCode,
|
errResp.StatusCode,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
return errwrap.Wrap(err, "error uploading backup to remote storage")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
||||||
@ -152,9 +156,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
|||||||
for candidate := range candidates {
|
for candidate := range candidates {
|
||||||
lenCandidates++
|
lenCandidates++
|
||||||
if candidate.Err != nil {
|
if candidate.Err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
|
||||||
candidate.Err,
|
candidate.Err,
|
||||||
|
"error looking up candidates from remote storage",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if candidate.LastModified.Before(deadline) {
|
if candidate.LastModified.Before(deadline) {
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
package ssh
|
package ssh
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -13,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@ -47,20 +47,20 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
||||||
key, err := os.ReadFile(opts.IdentityFile)
|
key, err := os.ReadFile(opts.IdentityFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error reading the private key")
|
return nil, errwrap.Wrap(nil, "error reading the private key")
|
||||||
}
|
}
|
||||||
|
|
||||||
var signer ssh.Signer
|
var signer ssh.Signer
|
||||||
if opts.IdentityPassphrase != "" {
|
if opts.IdentityPassphrase != "" {
|
||||||
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
|
return nil, errwrap.Wrap(nil, "error parsing the encrypted private key")
|
||||||
}
|
}
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
} else {
|
} else {
|
||||||
signer, err = ssh.ParsePrivateKey(key)
|
signer, err = ssh.ParsePrivateKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error parsing the private key")
|
return nil, errwrap.Wrap(nil, "error parsing the private key")
|
||||||
}
|
}
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
}
|
}
|
||||||
@ -74,7 +74,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating ssh client")
|
||||||
}
|
}
|
||||||
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -87,7 +87,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
sftp.MaxConcurrentRequestsPerFile(64),
|
sftp.MaxConcurrentRequestsPerFile(64),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating sftp client")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &sshStorage{
|
return &sshStorage{
|
||||||
@ -111,13 +111,13 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
source, err := os.Open(file)
|
source, err := os.Open(file)
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, " error reading the file to be uploaded")
|
||||||
}
|
}
|
||||||
defer source.Close()
|
defer source.Close()
|
||||||
|
|
||||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
|
return errwrap.Wrap(err, "error creating file")
|
||||||
}
|
}
|
||||||
defer destination.Close()
|
defer destination.Close()
|
||||||
|
|
||||||
@ -127,27 +127,27 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
return errors.New("(*sshStorage).Copy: failed to write stream")
|
return errwrap.Wrap(nil, "failed to write stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
|
return errwrap.Wrap(nil, "failed to write stream")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
|
return nil, errwrap.Wrap(err, "error reading directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
var matches []string
|
var matches []string
|
||||||
@ -181,7 +181,7 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
|
return errwrap.Wrap(err, "error removing file")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -4,8 +4,9 @@
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is an interface for defining functions which all storage providers support.
|
// Backend is an interface for defining functions which all storage providers support.
|
||||||
@ -26,7 +27,6 @@ type LogLevel int
|
|||||||
const (
|
const (
|
||||||
LogLevelInfo LogLevel = iota
|
LogLevelInfo LogLevel = iota
|
||||||
LogLevelWarning
|
LogLevelWarning
|
||||||
LogLevelError
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Log func(logType LogLevel, context string, msg string, params ...any)
|
type Log func(logType LogLevel, context string, msg string, params ...any)
|
||||||
@ -47,7 +47,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int,
|
|||||||
|
|
||||||
formattedDeadline, err := deadline.Local().MarshalText()
|
formattedDeadline, err := deadline.Local().MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*StorageBackend).DoPrune: error marshaling deadline: %w", err)
|
return errwrap.Wrap(err, "error marshaling deadline")
|
||||||
}
|
}
|
||||||
b.Log(LogLevelInfo, context,
|
b.Log(LogLevelInfo, context,
|
||||||
"Pruned %d out of %d backups as they were older than the given deadline of %s.",
|
"Pruned %d out of %d backups as they were older than the given deadline of %s.",
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
package webdav
|
package webdav
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -14,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/studio-b12/gowebdav"
|
"github.com/studio-b12/gowebdav"
|
||||||
)
|
)
|
||||||
@ -36,14 +36,14 @@ type Config struct {
|
|||||||
// NewStorageBackend creates and initializes a new WebDav storage backend.
|
// NewStorageBackend creates and initializes a new WebDav storage backend.
|
||||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
if opts.Username == "" || opts.Password == "" {
|
if opts.Username == "" || opts.Password == "" {
|
||||||
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
|
return nil, errwrap.Wrap(nil, "WEBDAV_URL is defined, but no credentials were provided")
|
||||||
} else {
|
} else {
|
||||||
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
|
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
|
||||||
|
|
||||||
if opts.URLInsecure {
|
if opts.URLInsecure {
|
||||||
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
|
return nil, errwrap.Wrap(nil, "unexpected error when asserting type for http.DefaultTransport")
|
||||||
}
|
}
|
||||||
webdavTransport := defaultTransport.Clone()
|
webdavTransport := defaultTransport.Clone()
|
||||||
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
|
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
|
||||||
@ -69,16 +69,16 @@ func (b *webDavStorage) Name() string {
|
|||||||
func (b *webDavStorage) Copy(file string) error {
|
func (b *webDavStorage) Copy(file string) error {
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s' on server", b.DestinationPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := os.Open(file)
|
r, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error {
|
|||||||
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.client.ReadDir(b.DestinationPath)
|
candidates, err := b.client.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
var matches []fs.FileInfo
|
var matches []fs.FileInfo
|
||||||
var lenCandidates int
|
var lenCandidates int
|
||||||
@ -111,7 +111,7 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
|
return errwrap.Wrap(err, "error removing file")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user