diff --git a/.dockerignore b/.dockerignore index 9daeafb..246a56f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,19 @@ -test +# Ignore everything +* + +# Exceptions: +# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks! + +!cmd/backup/*.go +!cmd/backup/*.tmpl + +!internal/storage/*.go +!internal/storage/local/*.go +!internal/storage/s3/*.go +!internal/storage/ssh/*.go +!internal/storage/webdav/*.go +!internal/utilities/*.go + +!Dockerfile +!entrypoint.sh +!go.* \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 8d64968..193034b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,9 +4,8 @@ FROM golang:1.18-alpine as builder WORKDIR /app -COPY go.mod go.sum ./ +COPY . . RUN go mod download -COPY cmd/backup ./cmd/backup/ WORKDIR /app/cmd/backup RUN go build -o backup . diff --git a/cmd/backup/config.go b/cmd/backup/config.go index e12d682..9326d43 100644 --- a/cmd/backup/config.go +++ b/cmd/backup/config.go @@ -12,6 +12,15 @@ import ( // Config holds all configuration values that are expected to be set // by users. type Config struct { + AwsS3BucketName string `split_words:"true"` + AwsS3Path string `split_words:"true"` + AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` + AwsEndpointProto string `split_words:"true" default:"https"` + AwsEndpointInsecure bool `split_words:"true"` + AwsStorageClass string `split_words:"true"` + AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` + AwsSecretAccessKey string `split_words:"true"` + AwsIamRoleEndpoint string `split_words:"true"` BackupSources string `split_words:"true" default:"/backup"` BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"` BackupFilenameExpand bool `split_words:"true"` @@ -23,15 +32,6 @@ type Config struct { BackupStopContainerLabel string `split_words:"true" default:"true"` BackupFromSnapshot bool `split_words:"true"` BackupExcludeRegexp RegexpDecoder `split_words:"true"` - AwsS3BucketName string `split_words:"true"` - AwsS3Path string `split_words:"true"` - AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` - AwsEndpointProto string `split_words:"true" default:"https"` - AwsEndpointInsecure bool `split_words:"true"` - AwsStorageClass string `split_words:"true"` - AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` - AwsSecretAccessKey string `split_words:"true"` - AwsIamRoleEndpoint string `split_words:"true"` GpgPassphrase string `split_words:"true"` NotificationURLs []string `envconfig:"NOTIFICATION_URLS"` NotificationLevel string `split_words:"true" default:"error"` diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index ed81679..777f396 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -6,6 +6,8 @@ package main import ( "fmt" "sort" + + "github.com/offen/docker-volume-backup/internal/utilities" ) // hook contains a queued action that can be trigger them when the script @@ -50,7 +52,7 @@ func (s *script) runHooks(err error) error { } } if len(actionErrors) != 0 { - return join(actionErrors...) + return utilities.Join(actionErrors...) } return nil } diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index 2bb5a79..e333964 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,6 +9,7 @@ import ( "time" "github.com/gofrs/flock" + "github.com/offen/docker-volume-backup/internal/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the @@ -31,7 +32,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return noop, fmt.Errorf("lock: error trying lock: %w", err) + return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err) } if acquired { if s.encounteredLock { @@ -52,7 +53,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return noop, errors.New("lock: timed out waiting for lockfile to become available") + return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 80af395..67b560c 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -12,6 +12,7 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" + "github.com/offen/docker-volume-backup/internal/utilities" ) //go:embed notifications.tmpl @@ -68,7 +69,7 @@ func (s *script) sendNotification(title, body string) error { } } if len(errs) != 0 { - return fmt.Errorf("sendNotification: error sending message: %w", join(errs...)) + return fmt.Errorf("sendNotification: error sending message: %w", utilities.Join(errs...)) } return nil } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index cd08361..ba17ffe 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -5,19 +5,22 @@ package main import ( "context" - "errors" "fmt" "io" "io/fs" - "io/ioutil" - "net/http" "os" "path" "path/filepath" - "strings" "text/template" "time" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/storage/local" + "github.com/offen/docker-volume-backup/internal/storage/s3" + "github.com/offen/docker-volume-backup/internal/storage/ssh" + "github.com/offen/docker-volume-backup/internal/storage/webdav" + "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr/pkg/router" "github.com/docker/docker/api/types" @@ -26,29 +29,21 @@ import ( "github.com/docker/docker/client" "github.com/kelseyhightower/envconfig" "github.com/leekchan/timeutil" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "github.com/otiai10/copy" - "github.com/pkg/sftp" "github.com/sirupsen/logrus" - "github.com/studio-b12/gowebdav" "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/ssh" ) // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - minioClient *minio.Client - webdavClient *gowebdav.Client - sshClient *ssh.Client - sftpClient *sftp.Client - logger *logrus.Logger - sender *router.ServiceRouter - template *template.Template - hooks []hook - hookLevel hookLevel + cli *client.Client + storages []storage.Backend + logger *logrus.Logger + sender *router.ServiceRouter + template *template.Template + hooks []hook + hookLevel hookLevel file string stats *Stats @@ -63,7 +58,7 @@ type script struct { // reading from env vars or other configuration sources is expected to happen // in this method. func newScript() (*script, error) { - stdOut, logBuffer := buffer(os.Stdout) + stdOut, logBuffer := utilities.Buffer(os.Stdout) s := &script{ c: &Config{}, logger: &logrus.Logger{ @@ -75,7 +70,12 @@ func newScript() (*script, error) { stats: &Stats{ StartTime: time.Now(), LogOutput: logBuffer, - Storages: StoragesStats{}, + Storages: map[string]StorageStats{ + "S3": {}, + "WebDav": {}, + "SSH": {}, + "Local": {}, + }, }, } @@ -107,114 +107,56 @@ func newScript() (*script, error) { s.cli = cli } + logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error { + var allParams []interface{} + allParams = append(allParams, context) + allParams = append(allParams, params...) + + switch logType { + case storage.INFO: + s.logger.Infof("[%s] "+msg, allParams...) + return nil + case storage.WARNING: + s.logger.Warnf("[%s] "+msg, allParams...) + return nil + case storage.ERROR: + return fmt.Errorf("[%s] "+msg, allParams...) + default: + s.logger.Warnf("[%s] "+msg, allParams...) + return nil + } + } + if s.c.AwsS3BucketName != "" { - var creds *credentials.Credentials - if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { - creds = credentials.NewStaticV4( - s.c.AwsAccessKeyID, - s.c.AwsSecretAccessKey, - "", - ) - } else if s.c.AwsIamRoleEndpoint != "" { - creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint) + if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, + s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil { + return nil, err } else { - return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") + s.storages = append(s.storages, s3Backend) } - - options := minio.Options{ - Creds: creds, - Secure: s.c.AwsEndpointProto == "https", - } - - if s.c.AwsEndpointInsecure { - if !options.Secure { - return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") - } - - transport, err := minio.DefaultTransport(true) - if err != nil { - return nil, fmt.Errorf("newScript: failed to create default minio transport") - } - transport.TLSClientConfig.InsecureSkipVerify = true - options.Transport = transport - } - - mc, err := minio.New(s.c.AwsEndpoint, &options) - if err != nil { - return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) - } - s.minioClient = mc } if s.c.WebdavUrl != "" { - if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { - return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, + s.c.WebdavUrlInsecure, logFunc); err != nil { + return nil, err } else { - webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) - s.webdavClient = webdavClient - if s.c.WebdavUrlInsecure { - defaultTransport, ok := http.DefaultTransport.(*http.Transport) - if !ok { - return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") - } - webdavTransport := defaultTransport.Clone() - webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure - s.webdavClient.SetTransport(webdavTransport) - } + s.storages = append(s.storages, webdavBackend) } } if s.c.SSHHostName != "" { - var authMethods []ssh.AuthMethod - - if s.c.SSHPassword != "" { - authMethods = append(authMethods, ssh.Password(s.c.SSHPassword)) - } - - if _, err := os.Stat(s.c.SSHIdentityFile); err == nil { - key, err := ioutil.ReadFile(s.c.SSHIdentityFile) - if err != nil { - return nil, errors.New("newScript: error reading the private key") - } - - var signer ssh.Signer - if s.c.SSHIdentityPassphrase != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase)) - if err != nil { - return nil, errors.New("newScript: error parsing the encrypted private key") - } - authMethods = append(authMethods, ssh.PublicKeys(signer)) - } else { - signer, err = ssh.ParsePrivateKey(key) - if err != nil { - return nil, errors.New("newScript: error parsing the private key") - } - authMethods = append(authMethods, ssh.PublicKeys(signer)) - } - } - - sshClientConfig := &ssh.ClientConfig{ - User: s.c.SSHUser, - Auth: authMethods, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig) - s.sshClient = sshClient - if err != nil { - return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) - } - _, _, err = s.sshClient.SendRequest("keepalive", false, nil) - if err != nil { + if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, + s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil { return nil, err - } - - sftpClient, err := sftp.NewClient(sshClient) - s.sftpClient = sftpClient - if err != nil { - return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) + } else { + s.storages = append(s.storages, sshBackend) } } + localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc) + s.storages = append(s.storages, localBackend) + if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( "smtp://%s:%s@%s:%d/?from=%s&to=%s", @@ -286,14 +228,14 @@ func newScript() (*script, error) { // restart everything that has been stopped. func (s *script) stopContainers() (func() error, error) { if s.cli == nil { - return noop, nil + return utilities.Noop, nil } allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Quiet: true, }) if err != nil { - return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) + return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) } containerLabel := fmt.Sprintf( @@ -309,11 +251,11 @@ func (s *script) stopContainers() (func() error, error) { }) if err != nil { - return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) + return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) } if len(containersToStop) == 0 { - return noop, nil + return utilities.Noop, nil } s.logger.Infof( @@ -338,7 +280,7 @@ func (s *script) stopContainers() (func() error, error) { stopError = fmt.Errorf( "stopContainersAndRun: %d error(s) stopping containers: %w", len(stopErrors), - join(stopErrors...), + utilities.Join(stopErrors...), ) } @@ -389,7 +331,7 @@ func (s *script) stopContainers() (func() error, error) { return fmt.Errorf( "stopContainersAndRun: %d error(s) restarting containers and services: %w", len(restartErrors), - join(restartErrors...), + utilities.Join(restartErrors...), ) } s.logger.Infof( @@ -415,7 +357,7 @@ func (s *script) createArchive() error { backupSources = filepath.Join("/tmp", s.c.BackupSources) // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(backupSources); err != nil { + if err := utilities.Remove(backupSources); err != nil { return fmt.Errorf("takeBackup: error removing snapshot: %w", err) } s.logger.Infof("Removed snapshot `%s`.", backupSources) @@ -432,7 +374,7 @@ func (s *script) createArchive() error { tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(tarFile); err != nil { + if err := utilities.Remove(tarFile); err != nil { return fmt.Errorf("takeBackup: error removing tar file: %w", err) } s.logger.Infof("Removed tar file `%s`.", tarFile) @@ -477,7 +419,7 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(gpgFile); err != nil { + if err := utilities.Remove(gpgFile); err != nil { return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) } s.logger.Infof("Removed GPG file `%s`.", gpgFile) @@ -485,20 +427,20 @@ func (s *script) encryptArchive() error { }) outFile, err := os.Create(gpgFile) - defer outFile.Close() if err != nil { return fmt.Errorf("encryptBackup: error opening out file: %w", err) } + defer outFile.Close() _, name := path.Split(s.file) dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{ IsBinary: true, FileName: name, }, nil) - defer dst.Close() if err != nil { return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) } + defer dst.Close() src, err := os.Open(s.file) if err != nil { @@ -529,93 +471,12 @@ func (s *script) copyArchive() error { } } - if s.minioClient != nil { - if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{ - ContentType: "application/tar+gzip", - StorageClass: s.c.AwsStorageClass, - }); err != nil { - errResp := minio.ToErrorResponse(err) - return fmt.Errorf("copyBackup: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) - } - s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName) - } - - if s.webdavClient != nil { - bytes, err := os.ReadFile(s.file) - if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) - } - if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err) - } - if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil { - return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) - } - s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath) - } - - if s.sshClient != nil { - source, err := os.Open(s.file) - if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) - } - defer source.Close() - - destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name)) - if err != nil { - return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) - } - defer destination.Close() - - chunk := make([]byte, 1000000) - for { - num, err := source.Read(chunk) - if err == io.EOF { - tot, err := destination.Write(chunk[:num]) - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") - } - - break - } - - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - tot, err := destination.Write(chunk[:num]) - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") - } - } - - s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath) - } - - if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil { - return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) - } - s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive) - if s.c.BackupLatestSymlink != "" { - symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink) - if _, err := os.Lstat(symlink); err == nil { - os.Remove(symlink) - } - if err := os.Symlink(name, symlink); err != nil { - return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) - } - s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink) + for _, backend := range s.storages { + if err := backend.Copy(s.file); err != nil { + return err } } + return nil } @@ -629,208 +490,18 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - // doPrune holds general control flow that applies to any kind of storage. - // Callers can pass in a thunk that performs the actual deletion of files. - var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { - if lenMatches != 0 && lenMatches != lenCandidates { - if err := doRemoveFiles(); err != nil { - return err + for _, backend := range s.storages { + if stats, err := backend.Prune(deadline, s.c.BackupPruningPrefix); err == nil { + s.stats.Storages[backend.Name()] = StorageStats{ + Total: stats.Total, + Pruned: stats.Pruned, } - s.logger.Infof( - "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", - lenMatches, - lenCandidates, - description, - s.c.BackupRetentionDays, - ) - } else if lenMatches != 0 && lenMatches == lenCandidates { - s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) - s.logger.Warn("Refusing to do so, please check your configuration.") + } else { - s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) + return err } - return nil } - if s.minioClient != nil { - candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{ - WithMetadata: true, - Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix), - Recursive: true, - }) - - var matches []minio.ObjectInfo - var lenCandidates int - for candidate := range candidates { - lenCandidates++ - if candidate.Err != nil { - return fmt.Errorf( - "pruneBackups: error looking up candidates from remote storage: %w", - candidate.Err, - ) - } - if candidate.LastModified.Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.S3 = StorageStats{ - Total: uint(lenCandidates), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { - objectsCh := make(chan minio.ObjectInfo) - go func() { - for _, match := range matches { - objectsCh <- match - } - close(objectsCh) - }() - errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) - var removeErrors []error - for result := range errChan { - if result.Err != nil { - removeErrors = append(removeErrors, result.Err) - } - } - if len(removeErrors) != 0 { - return join(removeErrors...) - } - return nil - }) - } - - if s.webdavClient != nil { - candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath) - if err != nil { - return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) - } - var matches []fs.FileInfo - var lenCandidates int - for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { - continue - } - lenCandidates++ - if candidate.ModTime().Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.WebDAV = StorageStats{ - Total: uint(lenCandidates), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { - for _, match := range matches { - if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil { - return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) - } - } - return nil - }) - } - - if s.sshClient != nil { - candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath) - if err != nil { - return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) - } - - var matches []string - for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { - continue - } - if candidate.ModTime().Before(deadline) { - matches = append(matches, candidate.Name()) - } - } - - s.stats.Storages.SSH = StorageStats{ - Total: uint(len(candidates)), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { - for _, match := range matches { - if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil { - return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) - } - } - return nil - }) - } - - if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - globPattern := path.Join( - s.c.BackupArchive, - fmt.Sprintf("%s*", s.c.BackupPruningPrefix), - ) - globMatches, err := filepath.Glob(globPattern) - if err != nil { - return fmt.Errorf( - "pruneBackups: error looking up matching files using pattern %s: %w", - globPattern, - err, - ) - } - - var candidates []string - for _, candidate := range globMatches { - fi, err := os.Lstat(candidate) - if err != nil { - return fmt.Errorf( - "pruneBackups: error calling Lstat on file %s: %w", - candidate, - err, - ) - } - - if fi.Mode()&os.ModeSymlink != os.ModeSymlink { - candidates = append(candidates, candidate) - } - } - - var matches []string - for _, candidate := range candidates { - fi, err := os.Stat(candidate) - if err != nil { - return fmt.Errorf( - "pruneBackups: error calling stat on file %s: %w", - candidate, - err, - ) - } - if fi.ModTime().Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.Local = StorageStats{ - Total: uint(len(candidates)), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), len(candidates), "local backup(s)", func() error { - var removeErrors []error - for _, match := range matches { - if err := os.Remove(match); err != nil { - removeErrors = append(removeErrors, err) - } - } - if len(removeErrors) != 0 { - return fmt.Errorf( - "pruneBackups: %d error(s) deleting local files, starting with: %w", - len(removeErrors), - join(removeErrors...), - ) - } - return nil - }) - } return nil } diff --git a/cmd/backup/stats.go b/cmd/backup/stats.go index bf8e46e..fbb5e11 100644 --- a/cmd/backup/stats.go +++ b/cmd/backup/stats.go @@ -30,14 +30,6 @@ type StorageStats struct { PruneErrors uint } -// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3) -type StoragesStats struct { - Local StorageStats - WebDAV StorageStats - SSH StorageStats - S3 StorageStats -} - // Stats global stats regarding script execution type Stats struct { StartTime time.Time @@ -47,5 +39,5 @@ type Stats struct { LogOutput *bytes.Buffer Containers ContainersStats BackupFile BackupFileStats - Storages StoragesStats + Storages map[string]StorageStats } diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go new file mode 100644 index 0000000..5d9158b --- /dev/null +++ b/internal/storage/local/local.go @@ -0,0 +1,131 @@ +package local + +import ( + "fmt" + "os" + "path" + "path/filepath" + "time" + + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/utilities" +) + +type localStorage struct { + *storage.StorageBackend + latestSymlink string +} + +// NewStorageBackend creates and initializes a new local storage backend. +func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend { + return &localStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: archivePath, + Log: logFunc, + }, + latestSymlink: latestSymlink, + } +} + +// Name return the name of the storage backend +func (b *localStorage) Name() string { + return "Local" +} + +// Copy copies the given file to the local storage backend. +func (b *localStorage) Copy(file string) error { + if _, err := os.Stat(b.DestinationPath); os.IsNotExist(err) { + return nil + } + + _, name := path.Split(file) + + if err := utilities.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err) + } + b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) + + if b.latestSymlink != "" { + symlink := path.Join(b.DestinationPath, b.latestSymlink) + if _, err := os.Lstat(symlink); err == nil { + os.Remove(symlink) + } + if err := os.Symlink(name, symlink); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: error creating latest symlink! %w", err) + } + b.Log(storage.INFO, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) + } + + return nil +} + +// Prune rotates away backups according to the configuration and provided deadline for the local storage backend. +func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + globPattern := path.Join( + b.DestinationPath, + fmt.Sprintf("%s*", pruningPrefix), + ) + globMatches, err := filepath.Glob(globPattern) + if err != nil { + return nil, b.Log(storage.ERROR, b.Name(), + "Prune: Error looking up matching files using pattern %s! %w", + globPattern, + err, + ) + } + + var candidates []string + for _, candidate := range globMatches { + fi, err := os.Lstat(candidate) + if err != nil { + return nil, b.Log(storage.ERROR, b.Name(), + "Prune: Error calling Lstat on file %s! %w", + candidate, + err, + ) + } + + if fi.Mode()&os.ModeSymlink != os.ModeSymlink { + candidates = append(candidates, candidate) + } + } + + var matches []string + for _, candidate := range candidates { + fi, err := os.Stat(candidate) + if err != nil { + return nil, b.Log(storage.ERROR, b.Name(), + "Prune: Error calling stat on file %s! %w", + candidate, + err, + ) + } + if fi.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + stats := &storage.PruneStats{ + Total: uint(len(candidates)), + Pruned: uint(len(matches)), + } + + b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error { + var removeErrors []error + for _, match := range matches { + if err := os.Remove(match); err != nil { + removeErrors = append(removeErrors, err) + } + } + if len(removeErrors) != 0 { + return b.Log(storage.ERROR, b.Name(), + "Prune: %d error(s) deleting local files, starting with: %w", + len(removeErrors), + utilities.Join(removeErrors...), + ) + } + return nil + }) + + return stats, nil +} diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go new file mode 100644 index 0000000..cb162ea --- /dev/null +++ b/internal/storage/s3/s3.go @@ -0,0 +1,145 @@ +package s3 + +import ( + "context" + "errors" + "path" + "path/filepath" + "time" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/utilities" +) + +type s3Storage struct { + *storage.StorageBackend + client *minio.Client + bucket string + storageClass string +} + +// NewStorageBackend creates and initializes a new S3/Minio storage backend. +func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, + remotePath string, bucket string, storageClass string, logFunc storage.Log) (storage.Backend, error) { + + var creds *credentials.Credentials + if accessKeyId != "" && secretAccessKey != "" { + creds = credentials.NewStaticV4( + accessKeyId, + secretAccessKey, + "", + ) + } else if iamRoleEndpoint != "" { + creds = credentials.NewIAM(iamRoleEndpoint) + } else { + return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") + } + + options := minio.Options{ + Creds: creds, + Secure: endpointProto == "https", + } + + if endpointInsecure { + if !options.Secure { + return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") + } + + transport, err := minio.DefaultTransport(true) + if err != nil { + return nil, logFunc(storage.ERROR, "S3", "NewScript: failed to create default minio transport") + } + transport.TLSClientConfig.InsecureSkipVerify = true + options.Transport = transport + } + + mc, err := minio.New(endpoint, &options) + if err != nil { + return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err) + } + + return &s3Storage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: mc, + bucket: bucket, + storageClass: storageClass, + }, nil +} + +// Name returns the name of the storage backend +func (v *s3Storage) Name() string { + return "S3" +} + +// Copy copies the given file to the S3/Minio storage backend. +func (b *s3Storage) Copy(file string) error { + _, name := path.Split(file) + + if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{ + ContentType: "application/tar+gzip", + StorageClass: b.storageClass, + }); err != nil { + errResp := minio.ToErrorResponse(err) + return b.Log(storage.ERROR, b.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) + } + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) + + return nil +} + +// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend. +func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{ + WithMetadata: true, + Prefix: filepath.Join(b.DestinationPath, pruningPrefix), + Recursive: true, + }) + + var matches []minio.ObjectInfo + var lenCandidates int + for candidate := range candidates { + lenCandidates++ + if candidate.Err != nil { + return nil, b.Log(storage.ERROR, b.Name(), + "Prune: Error looking up candidates from remote storage! %w", + candidate.Err, + ) + } + if candidate.LastModified.Before(deadline) { + matches = append(matches, candidate) + } + } + + stats := &storage.PruneStats{ + Total: uint(lenCandidates), + Pruned: uint(len(matches)), + } + + b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error { + objectsCh := make(chan minio.ObjectInfo) + go func() { + for _, match := range matches { + objectsCh <- match + } + close(objectsCh) + }() + errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{}) + var removeErrors []error + for result := range errChan { + if result.Err != nil { + removeErrors = append(removeErrors, result.Err) + } + } + if len(removeErrors) != 0 { + return utilities.Join(removeErrors...) + } + return nil + }) + + return stats, nil +} diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go new file mode 100644 index 0000000..c7dad70 --- /dev/null +++ b/internal/storage/ssh/ssh.go @@ -0,0 +1,176 @@ +package ssh + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +type sshStorage struct { + *storage.StorageBackend + client *ssh.Client + sftpClient *sftp.Client + hostName string +} + +// NewStorageBackend creates and initializes a new SSH storage backend. +func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, + logFunc storage.Log) (storage.Backend, error) { + + var authMethods []ssh.AuthMethod + + if password != "" { + authMethods = append(authMethods, ssh.Password(password)) + } + + if _, err := os.Stat(identityFile); err == nil { + key, err := ioutil.ReadFile(identityFile) + if err != nil { + return nil, errors.New("newScript: error reading the private key") + } + + var signer ssh.Signer + if identityPassphrase != "" { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(identityPassphrase)) + if err != nil { + return nil, errors.New("newScript: error parsing the encrypted private key") + } + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } else { + signer, err = ssh.ParsePrivateKey(key) + if err != nil { + return nil, errors.New("newScript: error parsing the private key") + } + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } + } + + sshClientConfig := &ssh.ClientConfig{ + User: user, + Auth: authMethods, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig) + + if err != nil { + return nil, logFunc(storage.ERROR, "SSH", "NewScript: Error creating ssh client! %w", err) + } + _, _, err = sshClient.SendRequest("keepalive", false, nil) + if err != nil { + return nil, err + } + + sftpClient, err := sftp.NewClient(sshClient) + if err != nil { + return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err) + } + + return &sshStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: sshClient, + sftpClient: sftpClient, + hostName: hostName, + }, nil +} + +// Name returns the name of the storage backend +func (b *sshStorage) Name() string { + return "SSH" +} + +// Copy copies the given file to the SSH storage backend. +func (b *sshStorage) Copy(file string) error { + source, err := os.Open(file) + _, name := path.Split(file) + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) + } + defer source.Close() + + destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name)) + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error creating file on SSH storage! %w", err) + } + defer destination.Close() + + chunk := make([]byte, 1000000) + for { + num, err := source.Read(chunk) + if err == io.EOF { + tot, err := destination.Write(chunk[:num]) + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + } + + if tot != len(chunk[:num]) { + return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") + } + + break + } + + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + } + + tot, err := destination.Write(chunk[:num]) + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + } + + if tot != len(chunk[:num]) { + return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") + } + } + + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath) + + return nil +} + +// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend. +func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates, err := b.sftpClient.ReadDir(b.DestinationPath) + if err != nil { + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error reading directory from SSH storage! %w", err) + } + + var matches []string + for _, candidate := range candidates { + if !strings.HasPrefix(candidate.Name(), pruningPrefix) { + continue + } + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate.Name()) + } + } + + stats := &storage.PruneStats{ + Total: uint(len(candidates)), + Pruned: uint(len(matches)), + } + + b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error { + for _, match := range matches { + if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { + return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err) + } + } + return nil + }) + + return stats, nil +} diff --git a/internal/storage/storage.go b/internal/storage/storage.go new file mode 100644 index 0000000..d33d2f4 --- /dev/null +++ b/internal/storage/storage.go @@ -0,0 +1,58 @@ +package storage + +import ( + "time" +) + +// Backend is an interface for defining functions which all storage providers support. +type Backend interface { + Copy(file string) error + Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error) + Name() string +} + +// StorageBackend is a generic type of storage. Everything here are common properties of all storage types. +type StorageBackend struct { + DestinationPath string + RetentionDays int + Log Log +} + +type LogType string + +const ( + INFO LogType = "INFO" + WARNING LogType = "WARNING" + ERROR LogType = "ERROR" +) + +type Log func(logType LogType, context string, msg string, params ...interface{}) error + +// PruneStats is a wrapper struct for returning stats after pruning +type PruneStats struct { + Total uint + Pruned uint +} + +// DoPrune holds general control flow that applies to any kind of storage. +// Callers can pass in a thunk that performs the actual deletion of files. +func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { + if lenMatches != 0 && lenMatches != lenCandidates { + if err := doRemoveFiles(); err != nil { + return err + } + b.Log(INFO, context, + "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", + lenMatches, + lenCandidates, + description, + b.RetentionDays, + ) + } else if lenMatches != 0 && lenMatches == lenCandidates { + b.Log(WARNING, context, "The current configuration would delete all %d existing %s.", lenMatches, description) + b.Log(WARNING, context, "Refusing to do so, please check your configuration.") + } else { + b.Log(INFO, context, "None of %d existing %s were pruned.", lenCandidates, description) + } + return nil +} diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go new file mode 100644 index 0000000..e2d7fd7 --- /dev/null +++ b/internal/storage/webdav/webdav.go @@ -0,0 +1,108 @@ +package webdav + +import ( + "errors" + "io/fs" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/studio-b12/gowebdav" +) + +type webDavStorage struct { + *storage.StorageBackend + client *gowebdav.Client + url string +} + +// NewStorageBackend creates and initializes a new WebDav storage backend. +func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, + logFunc storage.Log) (storage.Backend, error) { + + if username == "" || password == "" { + return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + } else { + webdavClient := gowebdav.NewClient(url, username, password) + + if urlInsecure { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") + } + webdavTransport := defaultTransport.Clone() + webdavTransport.TLSClientConfig.InsecureSkipVerify = urlInsecure + webdavClient.SetTransport(webdavTransport) + } + + return &webDavStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: webdavClient, + }, nil + } +} + +// Name returns the name of the storage backend +func (b *webDavStorage) Name() string { + return "WebDav" +} + +// Copy copies the given file to the WebDav storage backend. +func (b *webDavStorage) Copy(file string) error { + bytes, err := os.ReadFile(file) + _, name := path.Split(file) + if err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) + } + if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err) + } + if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) + } + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath) + + return nil +} + +// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend. +func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates, err := b.client.ReadDir(b.DestinationPath) + if err != nil { + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", err) + } + var matches []fs.FileInfo + var lenCandidates int + for _, candidate := range candidates { + if !strings.HasPrefix(candidate.Name(), pruningPrefix) { + continue + } + lenCandidates++ + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + stats := &storage.PruneStats{ + Total: uint(lenCandidates), + Pruned: uint(len(matches)), + } + + b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error { + for _, match := range matches { + if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { + return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err) + } + } + return nil + }) + + return stats, nil +} diff --git a/cmd/backup/util.go b/internal/utilities/util.go similarity index 88% rename from cmd/backup/util.go rename to internal/utilities/util.go index fd80da6..5d55a2d 100644 --- a/cmd/backup/util.go +++ b/internal/utilities/util.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package main +package utilities import ( "bytes" @@ -12,10 +12,10 @@ import ( "strings" ) -var noop = func() error { return nil } +var Noop = func() error { return nil } // copy creates a copy of the file located at `dst` at `src`. -func copyFile(src, dst string) error { +func CopyFile(src, dst string) error { in, err := os.Open(src) if err != nil { return err @@ -36,7 +36,7 @@ func copyFile(src, dst string) error { } // join takes a list of errors and joins them into a single error -func join(errs ...error) error { +func Join(errs ...error) error { if len(errs) == 1 { return errs[0] } @@ -51,7 +51,7 @@ func join(errs ...error) error { } // remove removes the given file or directory from disk. -func remove(location string) error { +func Remove(location string) error { fi, err := os.Lstat(location) if err != nil { if os.IsNotExist(err) { @@ -72,7 +72,7 @@ func remove(location string) error { // buffer takes an io.Writer and returns a wrapped version of the // writer that writes to both the original target as well as the returned buffer -func buffer(w io.Writer) (io.Writer, *bytes.Buffer) { +func Buffer(w io.Writer) (io.Writer, *bytes.Buffer) { buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w} return buffering, &buffering.buf }