Print context in log field instead of prepending to message (#260)

* Print context in log field instead of prepending to message

* Log messages on pruning do not need a description anymore

* Remove redundant information from logs and errors
This commit is contained in:
Frederik Ring 2023-08-25 12:44:43 +02:00 committed by GitHub
parent 88a4794083
commit 3d7677f02a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 43 additions and 39 deletions

View File

@ -126,11 +126,11 @@ func newScript() (*script, error) {
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
switch logType {
case storage.LogLevelWarning:
s.logger.Warn(fmt.Sprintf("["+context+"] "+msg, params...))
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
case storage.LogLevelError:
s.logger.Error(fmt.Sprintf("["+context+"] "+msg, params...))
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
default:
s.logger.Info(fmt.Sprintf("["+context+"] "+msg, params...))
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
}
}

View File

@ -132,7 +132,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), func() error {
wg := sync.WaitGroup{}
wg.Add(len(matches))
var errs []error

View File

@ -95,11 +95,11 @@ func (b *dropboxStorage) Copy(file string) error {
switch err := err.(type) {
case files.CreateFolderV2APIError:
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists in Dropbox, no new directory required.", b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
default:
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
}
}
@ -111,7 +111,7 @@ func (b *dropboxStorage) Copy(file string) error {
// Start new upload session and get session id
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
var sessionId string
uploadSessionStartArg := files.NewUploadSessionStartArg()
@ -201,7 +201,7 @@ loop:
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
return nil
}
@ -245,7 +245,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "Dropbox backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
for _, match := range matches {
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)

View File

@ -47,9 +47,9 @@ func (b *localStorage) Copy(file string) error {
_, name := path.Split(file)
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
@ -116,7 +116,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
@ -125,7 +125,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
len(removeErrors),
errors.Join(removeErrors...),
)

View File

@ -125,7 +125,12 @@ func (b *s3Storage) Copy(file string) error {
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
return fmt.Errorf(
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
errResp.Message,
errResp.Code,
errResp.StatusCode,
)
}
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
}
@ -148,7 +153,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
lenCandidates++
if candidate.Err != nil {
return nil, fmt.Errorf(
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
candidate.Err,
)
}
@ -162,7 +167,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {

View File

@ -75,7 +75,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
@ -108,13 +108,13 @@ func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
}
defer destination.Close()
@ -124,7 +124,7 @@ func (b *sshStorage) Copy(file string) error {
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
}
if tot != len(chunk[:num]) {
@ -135,12 +135,12 @@ func (b *sshStorage) Copy(file string) error {
}
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
}
if tot != len(chunk[:num]) {
@ -148,7 +148,7 @@ func (b *sshStorage) Copy(file string) error {
}
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
@ -157,7 +157,7 @@ func (b *sshStorage) Copy(file string) error {
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
}
var matches []string
@ -175,10 +175,10 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
}
}
return nil

View File

@ -39,23 +39,22 @@ type PruneStats struct {
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
"Pruned %d out of %d backups as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing backups.", lenMatches)
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
b.Log(LogLevelInfo, context, "None of %d existing backups were pruned.", lenCandidates)
}
return nil
}

View File

@ -69,18 +69,18 @@ func (b *webDavStorage) Name() string {
func (b *webDavStorage) Copy(file string) error {
_, name := path.Split(file)
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
}
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error {
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
@ -108,10 +108,10 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
}
}
return nil