diff --git a/README.md b/README.md index 2d6d72f..dfe71f7 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,15 @@ AWS_S3_BUCKET_NAME="" # Backups can be encrypted using gpg in case a passphrase is given # GPG_PASSPHRASE="" + +########### MINIO CLIENT CONFIGURATION + +# Pass these additional flags to all MinIO client `mc` invocations. +# This can be used for example to pass `--insecure` when using self +# signed certificates, or passing `--debug` to gain insights on +# unexpected behavior. + +# MC_GLOBAL_OPTIONS="" ``` ## Example in a docker-compose setup diff --git a/src/backup.sh b/src/backup.sh index 610a53f..1e9f584 100644 --- a/src/backup.sh +++ b/src/backup.sh @@ -58,7 +58,7 @@ fi if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then info "Uploading backup to remote storage" echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"." - mc cp "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME" + mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME" echo "Upload finished." fi @@ -76,14 +76,14 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then sleep "$BACKUP_PRUNING_LEEWAY" bucket=$AWS_S3_BUCKET_NAME - rule_applies_to=$(mc rm --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l) + rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l) if [ "$rule_applies_to" == "0" ]; then echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days." echo "Doing nothing." exit 0 fi - total=$(mc ls "backup-target/$bucket" | wc -l) + total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l) if [ "$rule_applies_to" == "$total" ]; then echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue." @@ -91,6 +91,6 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then exit 1 fi - mc rm --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" + mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days." fi diff --git a/src/entrypoint.sh b/src/entrypoint.sh index dedd7e0..9ab550a 100644 --- a/src/entrypoint.sh +++ b/src/entrypoint.sh @@ -21,11 +21,13 @@ AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}" AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}" GPG_PASSPHRASE="${GPG_PASSPHRASE:-}" + +MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}" EOF chmod a+x env.sh source env.sh -mc alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" +mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" # Add our cron entry, and direct stdout & stderr to Docker commands stdout echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."