mirror of
https://github.com/offen/docker-volume-backup.git
synced 2024-11-10 00:30:29 +01:00
Frederik Ring
c3daeacecb
* Query for labeled services as well * Try scaling down services * Scale services back up * Use progress tool from Docker CLI * In test, label both services * Clean up error and log messages * Document scale-up/down approach in docs * Downgrade Docker CLI to match client * Document services stats * Do not rely on PreviousSpec for storing desired replica count * Log warnings from Docker when updating services * Check whether container and service labels collide * Document script behavior on label collision * Add additional check if all containers have been removed * Scale services concurrently * Move docker interaction code into own file * Factor out code for service updating * Time out after five minutes of not reaching desired container count * Inline handling of in-swarm container level restart * Timer is more suitable for timeout race * Timeout when scaling down services should be configurable * Choose better filename * Reflect changes in naming * Rename and deprecate BACKUP_STOP_CONTAINER_LABEL * Improve logging * Further simplify logging
82 lines
1.6 KiB
Go
82 lines
1.6 KiB
Go
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"sync"
|
|
)
|
|
|
|
var noop = func() error { return nil }
|
|
|
|
// remove removes the given file or directory from disk.
|
|
func remove(location string) error {
|
|
fi, err := os.Lstat(location)
|
|
if err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil
|
|
}
|
|
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
|
}
|
|
if fi.IsDir() {
|
|
err = os.RemoveAll(location)
|
|
} else {
|
|
err = os.Remove(location)
|
|
}
|
|
if err != nil {
|
|
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// buffer takes an io.Writer and returns a wrapped version of the
|
|
// writer that writes to both the original target as well as the returned buffer
|
|
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
|
|
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
|
|
return buffering, &buffering.buf
|
|
}
|
|
|
|
type bufferingWriter struct {
|
|
buf bytes.Buffer
|
|
writer io.Writer
|
|
}
|
|
|
|
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
|
if n, err := b.buf.Write(p); err != nil {
|
|
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
|
|
}
|
|
return b.writer.Write(p)
|
|
}
|
|
|
|
type noopWriteCloser struct {
|
|
io.Writer
|
|
}
|
|
|
|
func (noopWriteCloser) Close() error {
|
|
return nil
|
|
}
|
|
|
|
type handledSwarmService struct {
|
|
serviceID string
|
|
initialReplicaCount uint64
|
|
}
|
|
|
|
type concurrentSlice[T any] struct {
|
|
val []T
|
|
sync.Mutex
|
|
}
|
|
|
|
func (c *concurrentSlice[T]) append(v T) {
|
|
c.Lock()
|
|
defer c.Unlock()
|
|
c.val = append(c.val, v)
|
|
}
|
|
|
|
func (c *concurrentSlice[T]) value() []T {
|
|
return c.val
|
|
}
|