mirror of
https://github.com/offen/docker-volume-backup.git
synced 2024-11-22 21:30:28 +01:00
Frederik Ring
0325889ac4
* Pruning method logs nonsensical configuration values * Adjust test assertion about log output
193 lines
5.3 KiB
Go
193 lines
5.3 KiB
Go
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package s3
|
|
|
|
import (
|
|
"context"
|
|
"crypto/x509"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"path"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/minio/minio-go/v7"
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
|
"github.com/offen/docker-volume-backup/internal/storage"
|
|
)
|
|
|
|
type s3Storage struct {
|
|
*storage.StorageBackend
|
|
client *minio.Client
|
|
bucket string
|
|
storageClass string
|
|
partSize int64
|
|
}
|
|
|
|
// Config contains values that define the configuration of a S3 backend.
|
|
type Config struct {
|
|
Endpoint string
|
|
AccessKeyID string
|
|
SecretAccessKey string
|
|
IamRoleEndpoint string
|
|
EndpointProto string
|
|
EndpointInsecure bool
|
|
RemotePath string
|
|
BucketName string
|
|
StorageClass string
|
|
PartSize int64
|
|
CACert *x509.Certificate
|
|
}
|
|
|
|
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
|
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
|
var creds *credentials.Credentials
|
|
if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
|
|
creds = credentials.NewStaticV4(
|
|
opts.AccessKeyID,
|
|
opts.SecretAccessKey,
|
|
"",
|
|
)
|
|
} else if opts.IamRoleEndpoint != "" {
|
|
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
|
} else {
|
|
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
|
}
|
|
|
|
options := minio.Options{
|
|
Creds: creds,
|
|
Secure: opts.EndpointProto == "https",
|
|
}
|
|
|
|
transport, err := minio.DefaultTransport(true)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
|
|
}
|
|
|
|
if opts.EndpointInsecure {
|
|
if !options.Secure {
|
|
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
|
}
|
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
|
} else if opts.CACert != nil {
|
|
if transport.TLSClientConfig.RootCAs == nil {
|
|
transport.TLSClientConfig.RootCAs = x509.NewCertPool()
|
|
}
|
|
transport.TLSClientConfig.RootCAs.AddCert(opts.CACert)
|
|
}
|
|
options.Transport = transport
|
|
|
|
mc, err := minio.New(opts.Endpoint, &options)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
|
|
}
|
|
|
|
return &s3Storage{
|
|
StorageBackend: &storage.StorageBackend{
|
|
DestinationPath: opts.RemotePath,
|
|
Log: logFunc,
|
|
},
|
|
client: mc,
|
|
bucket: opts.BucketName,
|
|
storageClass: opts.StorageClass,
|
|
partSize: opts.PartSize,
|
|
}, nil
|
|
}
|
|
|
|
// Name returns the name of the storage backend
|
|
func (v *s3Storage) Name() string {
|
|
return "S3"
|
|
}
|
|
|
|
// Copy copies the given file to the S3/Minio storage backend.
|
|
func (b *s3Storage) Copy(file string) error {
|
|
_, name := path.Split(file)
|
|
putObjectOptions := minio.PutObjectOptions{
|
|
ContentType: "application/tar+gzip",
|
|
StorageClass: b.storageClass,
|
|
}
|
|
|
|
if b.partSize > 0 {
|
|
srcFileInfo, err := os.Stat(file)
|
|
if err != nil {
|
|
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
|
|
}
|
|
|
|
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
|
if err != nil {
|
|
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
|
|
}
|
|
|
|
putObjectOptions.PartSize = uint64(partSize)
|
|
}
|
|
|
|
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
|
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
|
return fmt.Errorf(
|
|
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
|
errResp.Message,
|
|
errResp.Code,
|
|
errResp.StatusCode,
|
|
)
|
|
}
|
|
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
|
}
|
|
|
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
|
|
|
return nil
|
|
}
|
|
|
|
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
|
|
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
|
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
|
|
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
|
|
Recursive: true,
|
|
})
|
|
|
|
var matches []minio.ObjectInfo
|
|
var lenCandidates int
|
|
for candidate := range candidates {
|
|
lenCandidates++
|
|
if candidate.Err != nil {
|
|
return nil, fmt.Errorf(
|
|
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
|
candidate.Err,
|
|
)
|
|
}
|
|
if candidate.LastModified.Before(deadline) {
|
|
matches = append(matches, candidate)
|
|
}
|
|
}
|
|
|
|
stats := &storage.PruneStats{
|
|
Total: uint(lenCandidates),
|
|
Pruned: uint(len(matches)),
|
|
}
|
|
|
|
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
|
objectsCh := make(chan minio.ObjectInfo)
|
|
go func() {
|
|
for _, match := range matches {
|
|
objectsCh <- match
|
|
}
|
|
close(objectsCh)
|
|
}()
|
|
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
|
|
var removeErrors []error
|
|
for result := range errChan {
|
|
if result.Err != nil {
|
|
removeErrors = append(removeErrors, result.Err)
|
|
}
|
|
}
|
|
if len(removeErrors) != 0 {
|
|
return errors.Join(removeErrors...)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
return stats, pruneErr
|
|
}
|