mirror of
https://github.com/offen/docker-volume-backup.git
synced 2024-11-24 14:10:27 +01:00
improve logging messages
This commit is contained in:
parent
188c14c00f
commit
d195e8967f
@ -1,6 +1,3 @@
|
|||||||
# Copyright 2020 - Offen Authors <hioffen@posteo.de>
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
# EditorConfig is awesome: http://EditorConfig.org
|
# EditorConfig is awesome: http://EditorConfig.org
|
||||||
|
|
||||||
# top-most EditorConfig file
|
# top-most EditorConfig file
|
||||||
|
@ -31,23 +31,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
unlock, err := lock("/var/dockervolumebackup.lock")
|
unlock := lock("/var/dockervolumebackup.lock")
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
s := &script{}
|
s := &script{}
|
||||||
|
s.must(s.init())
|
||||||
must(s.init)()
|
s.must(s.stopContainersAndRun(s.takeBackup))
|
||||||
err = s.stopContainersAndRun(s.takeBackup)
|
s.must(s.encryptBackup())
|
||||||
if err != nil {
|
s.must(s.copyBackup())
|
||||||
panic(err)
|
s.must(s.cleanBackup())
|
||||||
}
|
s.must(s.pruneOldBackups())
|
||||||
must(s.encryptBackup)()
|
|
||||||
must(s.copyBackup)()
|
|
||||||
must(s.cleanBackup)()
|
|
||||||
must(s.pruneOldBackups)()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type script struct {
|
type script struct {
|
||||||
@ -58,14 +51,17 @@ type script struct {
|
|||||||
file string
|
file string
|
||||||
bucket string
|
bucket string
|
||||||
archive string
|
archive string
|
||||||
|
sources string
|
||||||
|
passphrase string
|
||||||
}
|
}
|
||||||
|
|
||||||
// lock opens a lockfile, keeping it open until the caller invokes the returned
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
// release func.
|
// caller invokes the returned release func. When invoked while the file is
|
||||||
func lock(lockfile string) (func() error, error) {
|
// still locked the function panics.
|
||||||
|
func lock(lockfile string) func() error {
|
||||||
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
|
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("lock: error opening lock file: %w", err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return func() error {
|
return func() error {
|
||||||
if err := lf.Close(); err != nil {
|
if err := lf.Close(); err != nil {
|
||||||
@ -75,7 +71,7 @@ func lock(lockfile string) (func() error, error) {
|
|||||||
return fmt.Errorf("lock: error removing lock file: %w", err)
|
return fmt.Errorf("lock: error removing lock file: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// init creates all resources needed for the script to perform actions against
|
// init creates all resources needed for the script to perform actions against
|
||||||
@ -120,6 +116,8 @@ func (s *script) init() error {
|
|||||||
}
|
}
|
||||||
s.file = path.Join("/tmp", file)
|
s.file = path.Join("/tmp", file)
|
||||||
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
||||||
|
s.sources = os.Getenv("BACKUP_SOURCES")
|
||||||
|
s.passphrase = os.Getenv("GPG_PASSPHRASE")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,21 +135,27 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|||||||
return fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
return fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
containerLabel := fmt.Sprintf(
|
||||||
|
"docker-volume-backup.stop-during-backup=%s",
|
||||||
|
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
|
||||||
|
)
|
||||||
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
||||||
Quiet: true,
|
Quiet: true,
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: fmt.Sprintf(
|
Value: containerLabel,
|
||||||
"docker-volume-backup.stop-during-backup=%s",
|
|
||||||
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
|
|
||||||
),
|
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Stopping %d out of %d running containers\n", len(containersToStop), len(allContainers))
|
s.logger.Infof(
|
||||||
|
"Stopping %d containers labeled `%s` out of %d running containers.",
|
||||||
|
len(containersToStop),
|
||||||
|
containerLabel,
|
||||||
|
len(allContainers),
|
||||||
|
)
|
||||||
|
|
||||||
var stoppedContainers []types.Container
|
var stoppedContainers []types.Container
|
||||||
var errors []error
|
var errors []error
|
||||||
@ -237,10 +241,10 @@ func (s *script) takeBackup() error {
|
|||||||
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
|
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
|
||||||
}
|
}
|
||||||
s.file = strings.TrimSpace(string(outBytes))
|
s.file = strings.TrimSpace(string(outBytes))
|
||||||
if err := targz.Compress(os.Getenv("BACKUP_SOURCES"), s.file); err != nil {
|
if err := targz.Compress(s.sources, s.file); err != nil {
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully created backup from %s at %s", os.Getenv("BACKUP_SOURCES"), s.file)
|
s.logger.Infof("Successfully created backup of `%s` at `%s`.", s.sources, s.file)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,14 +252,13 @@ func (s *script) takeBackup() error {
|
|||||||
// In case no passphrase is given it returns early, leaving the backup file
|
// In case no passphrase is given it returns early, leaving the backup file
|
||||||
// untouched.
|
// untouched.
|
||||||
func (s *script) encryptBackup() error {
|
func (s *script) encryptBackup() error {
|
||||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
if s.passphrase == "" {
|
||||||
if passphrase == "" {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
_, name := path.Split(s.file)
|
_, name := path.Split(s.file)
|
||||||
pt, err := openpgp.SymmetricallyEncrypt(buf, []byte(passphrase), &openpgp.FileHints{
|
pt, err := openpgp.SymmetricallyEncrypt(buf, []byte(s.passphrase), &openpgp.FileHints{
|
||||||
IsBinary: true,
|
IsBinary: true,
|
||||||
FileName: name,
|
FileName: name,
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -284,7 +287,7 @@ func (s *script) encryptBackup() error {
|
|||||||
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
||||||
}
|
}
|
||||||
s.file = gpgFile
|
s.file = gpgFile
|
||||||
s.logger.Info("Successfully encrypted backup using given passphrase.")
|
s.logger.Infof("Successfully encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -299,7 +302,7 @@ func (s *script) copyBackup() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully uploaded backup %s to bucket %s", s.file, s.bucket)
|
s.logger.Infof("Successfully uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.archive != "" {
|
if s.archive != "" {
|
||||||
@ -308,7 +311,7 @@ func (s *script) copyBackup() error {
|
|||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully stored copy of backup %s in local archive %s", s.file, s.archive)
|
s.logger.Infof("Successfully stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -334,11 +337,12 @@ func (s *script) pruneOldBackups() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
||||||
}
|
}
|
||||||
sleepFor, err := time.ParseDuration(os.Getenv("BACKUP_PRUNING_LEEWAY"))
|
leeway := os.Getenv("BACKUP_PRUNING_LEEWAY")
|
||||||
|
sleepFor, err := time.ParseDuration(leeway)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
|
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Sleeping for %s before pruning backups.", os.Getenv("BACKUP_PRUNING_LEEWAY"))
|
s.logger.Infof("Sleeping for %s before pruning backups.", leeway)
|
||||||
time.Sleep(sleepFor)
|
time.Sleep(sleepFor)
|
||||||
|
|
||||||
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
|
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
|
||||||
@ -391,7 +395,10 @@ func (s *script) pruneOldBackups() error {
|
|||||||
lenCandidates,
|
lenCandidates,
|
||||||
)
|
)
|
||||||
} else if len(matches) != 0 && len(matches) == lenCandidates {
|
} else if len(matches) != 0 && len(matches) == lenCandidates {
|
||||||
s.logger.Warnf("The current configuration would delete all %d remote backups. Refusing to do so.", len(matches))
|
s.logger.Warnf(
|
||||||
|
"The current configuration would delete all %d remote backup copies. Refusing to do so, please check your configuration.",
|
||||||
|
len(matches),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
|
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
|
||||||
}
|
}
|
||||||
@ -443,7 +450,10 @@ func (s *script) pruneOldBackups() error {
|
|||||||
len(candidates),
|
len(candidates),
|
||||||
)
|
)
|
||||||
} else if len(matches) != 0 && len(matches) == len(candidates) {
|
} else if len(matches) != 0 && len(matches) == len(candidates) {
|
||||||
s.logger.Warnf("The current configuration would delete all %d local backups. Refusing to do so.", len(matches))
|
s.logger.Warnf(
|
||||||
|
"The current configuration would delete all %d local backup copies. Refusing to do so, please check your configuration.",
|
||||||
|
len(matches),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
|
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
|
||||||
}
|
}
|
||||||
@ -451,11 +461,13 @@ func (s *script) pruneOldBackups() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func must(f func() error) func() {
|
func (s *script) must(err error) {
|
||||||
return func() {
|
if err != nil {
|
||||||
if err := f(); err != nil {
|
if s.logger == nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
s.logger.Errorf("Fatal error running backup: %s", err)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user