implement lock file to ensure backup runs mutually exclusive

This commit is contained in:
Frederik Ring 2021-08-22 11:02:10 +02:00
parent 0c6ac05789
commit 4d9482a8b4

View File

@ -6,7 +6,10 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"os/exec"
"path" "path"
"strings"
"time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
@ -21,17 +24,25 @@ import (
func main() { func main() {
s := &script{} s := &script{}
s.lock() must(s.lock)()
defer s.unlock() defer s.unlock()
must(s.init)() must(s.init)()
fmt.Println("Successfully initialized resources.")
must(s.stopContainers)() must(s.stopContainers)()
fmt.Println("Successfully stopped containers.")
must(s.takeBackup)() must(s.takeBackup)()
fmt.Println("Successfully took backup.")
must(s.restartContainers)() must(s.restartContainers)()
fmt.Println("Successfully restarted containers.")
must(s.encryptBackup)() must(s.encryptBackup)()
fmt.Println("Successfully encrypted backup.")
must(s.copyBackup)() must(s.copyBackup)()
fmt.Println("Successfully copied backup.")
must(s.cleanBackup)() must(s.cleanBackup)()
must(s.pruneBackups)() fmt.Println("Successfully cleaned local backup.")
must(s.pruneOldBackups)()
fmt.Println("Successfully pruned old backup.")
} }
type script struct { type script struct {
@ -39,11 +50,28 @@ type script struct {
cli *client.Client cli *client.Client
mc *minio.Client mc *minio.Client
stoppedContainers []types.Container stoppedContainers []types.Container
releaseLock func() error
file string file string
} }
func (s *script) lock() {} func (s *script) lock() error {
func (s *script) unlock() {} lf, err := os.OpenFile("/var/dockervolumebackup.lock", os.O_CREATE, os.ModeAppend)
if err != nil {
return fmt.Errorf("lock: error opening lock file: %w", err)
}
s.releaseLock = lf.Close
return nil
}
func (s *script) unlock() error {
if err := s.releaseLock(); err != nil {
return fmt.Errorf("unlock: error releasing file lock: %w", err)
}
if err := os.Remove("/var/dockervolumebackup.lock"); err != nil {
return fmt.Errorf("unlock: error removing lock file: %w", err)
}
return nil
}
func (s *script) init() error { func (s *script) init() error {
s.ctx = context.Background() s.ctx = context.Background()
@ -85,7 +113,14 @@ func (s *script) stopContainers() error {
if s.cli == nil { if s.cli == nil {
return nil return nil
} }
stoppedContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{ allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
Quiet: true,
})
if err != nil {
return fmt.Errorf("stopContainers: error querying for containers: %w", err)
}
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
Quiet: true, Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{ Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label", Key: "label",
@ -96,9 +131,9 @@ func (s *script) stopContainers() error {
if err != nil { if err != nil {
return fmt.Errorf("stopContainers: error querying for containers to stop: %w", err) return fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
} }
fmt.Printf("Stopping %d containers\n", len(stoppedContainers)) fmt.Printf("Stopping %d out of %d running containers\n", len(containersToStop), len(allContainers))
if len(stoppedContainers) != 0 { if len(containersToStop) != 0 {
fmt.Println("Stopping containers") fmt.Println("Stopping containers")
for _, container := range s.stoppedContainers { for _, container := range s.stoppedContainers {
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil { if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
@ -107,15 +142,21 @@ func (s *script) stopContainers() error {
} }
} }
s.stoppedContainers = stoppedContainers s.stoppedContainers = containersToStop
return nil return nil
} }
func (s *script) takeBackup() error { func (s *script) takeBackup() error {
file := os.Getenv("BACKUP_FILENAME") if os.Getenv("BACKUP_FILENAME") == "" {
if file == "" {
return errors.New("takeBackup: BACKUP_FILENAME not given") return errors.New("takeBackup: BACKUP_FILENAME not given")
} }
outBytes, err := exec.Command("date", fmt.Sprintf("+%s", os.Getenv("BACKUP_FILENAME"))).Output()
if err != nil {
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
}
file := fmt.Sprintf("/tmp/%s", strings.TrimSpace(string(outBytes)))
s.file = file s.file = file
if err := targz.Compress(os.Getenv("BACKUP_SOURCES"), s.file); err != nil { if err := targz.Compress(os.Getenv("BACKUP_SOURCES"), s.file); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err) return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
@ -124,7 +165,6 @@ func (s *script) takeBackup() error {
} }
func (s *script) restartContainers() error { func (s *script) restartContainers() error {
fmt.Println("Starting containers/services back up")
servicesRequiringUpdate := map[string]struct{}{} servicesRequiringUpdate := map[string]struct{}{}
for _, container := range s.stoppedContainers { for _, container := range s.stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok { if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
@ -156,6 +196,8 @@ func (s *script) restartContainers() error {
) )
} }
} }
s.stoppedContainers = []types.Container{}
return nil return nil
} }
@ -194,17 +236,28 @@ func (s *script) cleanBackup() error {
return nil return nil
} }
func (s *script) pruneBackups() error { func (s *script) pruneOldBackups() error {
retention := os.Getenv("BACKUP_RETENTION_DAYS") retention := os.Getenv("BACKUP_RETENTION_DAYS")
if retention == "" { if retention == "" {
return nil return nil
} }
return errors.New("pruneBackups: not implemented yet")
sleepFor, err := time.ParseDuration(os.Getenv("BACKUP_PRUNING_LEEWAY"))
if err != nil {
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
}
time.Sleep(sleepFor)
if bucket := os.Getenv("AWS_S3_BUCKET_NAME"); bucket != "" {
}
if archive := os.Getenv("BACKUP_ARCHIVE"); archive != "" {
}
return nil
} }
func fileExists(location string) (bool, error) { func fileExists(location string) (bool, error) {
_, err := os.Stat(location) _, err := os.Stat(location)
if err != nil && err != os.ErrNotExist { if err != nil && !os.IsNotExist(err) {
return false, err return false, err
} }
return err == nil, nil return err == nil, nil
@ -229,10 +282,10 @@ func copy(src, dst string) error {
if err != nil { if err != nil {
return err return err
} }
defer out.Close()
_, err = io.Copy(out, in) _, err = io.Copy(out, in)
if err != nil { if err != nil {
out.Close()
return err return err
} }
return out.Close() return out.Close()