adapt repo layout to go

This commit is contained in:
Frederik Ring 2021-08-22 18:07:32 +02:00
parent 435583168b
commit da9458724f
4 changed files with 13 additions and 192 deletions

View File

@ -5,8 +5,8 @@ FROM golang:1.17-alpine as builder
WORKDIR /app WORKDIR /app
COPY go.mod go.sum ./ COPY go.mod go.sum ./
COPY src/main.go ./src/main.go COPY cmd/backup/main.go ./cmd/backup/main.go
RUN go build -o backup src/main.go RUN go build -o backup cmd/backup/main.go
FROM alpine:3.14 FROM alpine:3.14
@ -16,7 +16,7 @@ RUN apk add --update ca-certificates
COPY --from=builder /app/backup /usr/bin/backup COPY --from=builder /app/backup /usr/bin/backup
COPY src/entrypoint.sh /root/ COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh RUN chmod +x entrypoint.sh
ENTRYPOINT ["/root/entrypoint.sh"] ENTRYPOINT ["/root/entrypoint.sh"]

View File

@ -1,3 +1,6 @@
// Copyright 2021 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main package main
import ( import (
@ -28,7 +31,7 @@ import (
) )
func main() { func main() {
unlock, err := lock() unlock, err := lock("/var/dockervolumebackup.lock")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -57,10 +60,9 @@ type script struct {
archive string archive string
} }
// lock opens a lock file without releasing it and returns a function that // lock opens a lockfile, keeping it open until the caller invokes the returned
// can be called once the lock shall be released again. // release func.
func lock() (func() error, error) { func lock(lockfile string) (func() error, error) {
lockfile := "/var/dockervolumebackup.lock"
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend) lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
if err != nil { if err != nil {
return nil, fmt.Errorf("lock: error opening lock file: %w", err) return nil, fmt.Errorf("lock: error opening lock file: %w", err)
@ -316,7 +318,7 @@ func (s *script) cleanBackup() error {
if err := os.Remove(s.file); err != nil { if err := os.Remove(s.file); err != nil {
return fmt.Errorf("cleanBackup: error removing file: %w", err) return fmt.Errorf("cleanBackup: error removing file: %w", err)
} }
s.logger.Info("Successfully cleaned local backup.") s.logger.Info("Successfully cleaned up local artifacts.")
return nil return nil
} }
@ -391,7 +393,7 @@ func (s *script) pruneOldBackups() error {
} else if len(matches) != 0 && len(matches) == lenCandidates { } else if len(matches) != 0 && len(matches) == lenCandidates {
s.logger.Warnf("The current configuration would delete all %d remote backups. Refusing to do so.", len(matches)) s.logger.Warnf("The current configuration would delete all %d remote backups. Refusing to do so.", len(matches))
} else { } else {
s.logger.Info("No remote backups were pruned.") s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
} }
} }
@ -443,7 +445,7 @@ func (s *script) pruneOldBackups() error {
} else if len(matches) != 0 && len(matches) == len(candidates) { } else if len(matches) != 0 && len(matches) == len(candidates) {
s.logger.Warnf("The current configuration would delete all %d local backups. Refusing to do so.", len(matches)) s.logger.Warnf("The current configuration would delete all %d local backups. Refusing to do so.", len(matches))
} else { } else {
s.logger.Info("No local backups were pruned.") s.logger.Infof("None of %d local backups were pruned.", len(candidates))
} }
} }
return nil return nil

View File

@ -1,181 +0,0 @@
#!/bin/sh
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
# Portions of this file are taken from github.com/futurice/docker-volume-backup
# See NOTICE for information about authors and licensing.
source env.sh
function info {
echo -e "\n[INFO] $1\n"
}
info "Preparing backup"
DOCKER_SOCK="/var/run/docker.sock"
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps -q \
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
> "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total."
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
else
CONTAINERS_TO_STOP_TOTAL="0"
CONTAINERS_TOTAL="0"
echo "Cannot access \"$DOCKER_SOCK\", won't look for containers to stop."
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Stopping containers"
docker stop $CONTAINERS_TO_STOP
fi
info "Creating backup"
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
if [ ! -z "$GPG_PASSPHRASE" ]; then
info "Encrypting backup"
gpg --symmetric --cipher-algo aes256 --batch --passphrase "$GPG_PASSPHRASE" \
-o "${BACKUP_FILENAME}.gpg" $BACKUP_FILENAME
rm $BACKUP_FILENAME
BACKUP_FILENAME="${BACKUP_FILENAME}.gpg"
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Starting containers/services back up"
# The container might be part of a stack when running in swarm mode, so
# its parent service needs to be restarted instead once backup is finished.
SERVICES_REQUIRING_UPDATE=""
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
SWARM_SERVICE_NAME=$(
docker inspect \
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
$CONTAINER_ID
)
if [ -z "$SWARM_SERVICE_NAME" ]; then
echo "Restarting $(docker start $CONTAINER_ID)"
else
echo "Removing $(docker rm $CONTAINER_ID)"
# Multiple containers might belong to the same service, so they will
# be restarted only after all names are known.
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
fi
done
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
docker service update --force $SERVICE_NAME
done
fi
fi
copy_backup () {
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
}
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished."
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Copying backup to local archive"
echo "Will copy to \"$BACKUP_ARCHIVE\"."
copy_backup "$BACKUP_ARCHIVE"
echo "Finished copying."
fi
if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
fi
info "Backup finished"
echo "Will wait for next scheduled backup."
probe_expired () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f -mtime "+${BACKUP_RETENTION_DAYS}"
fi
}
probe_all () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc ls $MC_GLOBAL_OPTIONS "$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f
fi
}
delete () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc rm $MC_GLOBAL_OPTIONS --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f -mtime "+${BACKUP_RETENTION_DAYS}" -delete
fi
}
prune () {
local target=$1
local is_local=$2
rule_applies_to=$(probe_expired "$target" "$is_local" | wc -l)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of ${BACKUP_RETENTION_DAYS} days."
echo "Doing nothing."
else
total=$(probe_all "$target" "$is_local" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
echo "If this is what you want, please remove files manually instead of using this script."
else
delete "$target" "$is_local"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi
fi
}
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Pruning old backups from remote storage"
prune "backup-target/$AWS_S3_BUCKET_NAME"
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Pruning old backups from local archive"
prune "$BACKUP_ARCHIVE" "local"
fi
fi