mirror of
https://github.com/offen/docker-volume-backup.git
synced 2024-11-10 00:30:29 +01:00
Print context in log field instead of prepending to message (#260)
* Print context in log field instead of prepending to message * Log messages on pruning do not need a description anymore * Remove redundant information from logs and errors
This commit is contained in:
parent
88a4794083
commit
3d7677f02a
@ -126,11 +126,11 @@ func newScript() (*script, error) {
|
|||||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||||
switch logType {
|
switch logType {
|
||||||
case storage.LogLevelWarning:
|
case storage.LogLevelWarning:
|
||||||
s.logger.Warn(fmt.Sprintf("["+context+"] "+msg, params...))
|
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
case storage.LogLevelError:
|
case storage.LogLevelError:
|
||||||
s.logger.Error(fmt.Sprintf("["+context+"] "+msg, params...))
|
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
default:
|
default:
|
||||||
s.logger.Info(fmt.Sprintf("["+context+"] "+msg, params...))
|
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), func() error {
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(matches))
|
wg.Add(len(matches))
|
||||||
var errs []error
|
var errs []error
|
||||||
|
@ -95,11 +95,11 @@ func (b *dropboxStorage) Copy(file string) error {
|
|||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case files.CreateFolderV2APIError:
|
case files.CreateFolderV2APIError:
|
||||||
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
|
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists in Dropbox, no new directory required.", b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
|
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ func (b *dropboxStorage) Copy(file string) error {
|
|||||||
|
|
||||||
// Start new upload session and get session id
|
// Start new upload session and get session id
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
var sessionId string
|
var sessionId string
|
||||||
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||||
@ -201,7 +201,7 @@ loop:
|
|||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -245,7 +245,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "Dropbox backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
||||||
|
@ -47,9 +47,9 @@ func (b *localStorage) Copy(file string) error {
|
|||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
|
|
||||||
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
||||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
|
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
||||||
|
|
||||||
if b.latestSymlink != "" {
|
if b.latestSymlink != "" {
|
||||||
symlink := path.Join(b.DestinationPath, b.latestSymlink)
|
symlink := path.Join(b.DestinationPath, b.latestSymlink)
|
||||||
@ -116,7 +116,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
|
||||||
var removeErrors []error
|
var removeErrors []error
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := os.Remove(match); err != nil {
|
if err := os.Remove(match); err != nil {
|
||||||
@ -125,7 +125,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
}
|
}
|
||||||
if len(removeErrors) != 0 {
|
if len(removeErrors) != 0 {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
|
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
|
||||||
len(removeErrors),
|
len(removeErrors),
|
||||||
errors.Join(removeErrors...),
|
errors.Join(removeErrors...),
|
||||||
)
|
)
|
||||||
|
@ -125,7 +125,12 @@ func (b *s3Storage) Copy(file string) error {
|
|||||||
|
|
||||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
|
return fmt.Errorf(
|
||||||
|
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||||
|
errResp.Message,
|
||||||
|
errResp.Code,
|
||||||
|
errResp.StatusCode,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
||||||
}
|
}
|
||||||
@ -148,7 +153,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
|||||||
lenCandidates++
|
lenCandidates++
|
||||||
if candidate.Err != nil {
|
if candidate.Err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
|
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
||||||
candidate.Err,
|
candidate.Err,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -162,7 +167,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||||
objectsCh := make(chan minio.ObjectInfo)
|
objectsCh := make(chan minio.ObjectInfo)
|
||||||
go func() {
|
go func() {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
|
@ -75,7 +75,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
|
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
|
||||||
}
|
}
|
||||||
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -108,13 +108,13 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
source, err := os.Open(file)
|
source, err := os.Open(file)
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
|
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
|
||||||
}
|
}
|
||||||
defer source.Close()
|
defer source.Close()
|
||||||
|
|
||||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
|
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
|
||||||
}
|
}
|
||||||
defer destination.Close()
|
defer destination.Close()
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
@ -135,12 +135,12 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
@ -148,7 +148,7 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
|
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var matches []string
|
var matches []string
|
||||||
@ -175,10 +175,10 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
|
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -39,23 +39,22 @@ type PruneStats struct {
|
|||||||
|
|
||||||
// DoPrune holds general control flow that applies to any kind of storage.
|
// DoPrune holds general control flow that applies to any kind of storage.
|
||||||
// Callers can pass in a thunk that performs the actual deletion of files.
|
// Callers can pass in a thunk that performs the actual deletion of files.
|
||||||
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
|
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, doRemoveFiles func() error) error {
|
||||||
if lenMatches != 0 && lenMatches != lenCandidates {
|
if lenMatches != 0 && lenMatches != lenCandidates {
|
||||||
if err := doRemoveFiles(); err != nil {
|
if err := doRemoveFiles(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
b.Log(LogLevelInfo, context,
|
b.Log(LogLevelInfo, context,
|
||||||
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
|
"Pruned %d out of %d backups as their age exceeded the configured retention period of %d days.",
|
||||||
lenMatches,
|
lenMatches,
|
||||||
lenCandidates,
|
lenCandidates,
|
||||||
description,
|
|
||||||
b.RetentionDays,
|
b.RetentionDays,
|
||||||
)
|
)
|
||||||
} else if lenMatches != 0 && lenMatches == lenCandidates {
|
} else if lenMatches != 0 && lenMatches == lenCandidates {
|
||||||
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
|
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing backups.", lenMatches)
|
||||||
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
|
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
|
||||||
} else {
|
} else {
|
||||||
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
|
b.Log(LogLevelInfo, context, "None of %d existing backups were pruned.", lenCandidates)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -69,18 +69,18 @@ func (b *webDavStorage) Name() string {
|
|||||||
func (b *webDavStorage) Copy(file string) error {
|
func (b *webDavStorage) Copy(file string) error {
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
|
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := os.Open(file)
|
r, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
|
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
|
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error {
|
|||||||
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.client.ReadDir(b.DestinationPath)
|
candidates, err := b.client.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
|
||||||
}
|
}
|
||||||
var matches []fs.FileInfo
|
var matches []fs.FileInfo
|
||||||
var lenCandidates int
|
var lenCandidates int
|
||||||
@ -108,10 +108,10 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
|
|||||||
Pruned: uint(len(matches)),
|
Pruned: uint(len(matches)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
|
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
|
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user