feat: add better handler for part size (#214)

* feat: add better handler for part size


fix: use local file 


fix: try with another path


fix: use bytes 


chore: go back


go back readme


goback


goback


goback

* chore: better handling

* fix: typo readme

* chore: wrong comparaison

* fix: typo
This commit is contained in:
Erwan LE PRADO 2023-06-02 16:30:02 +02:00 committed by GitHub
parent bcffe0bc25
commit 5ea9a7ce15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 34 additions and 3 deletions

View File

@ -260,6 +260,15 @@ You can populate below template according to your requirements and use it as you
# AWS_STORAGE_CLASS="GLACIER" # AWS_STORAGE_CLASS="GLACIER"
# Setting this variable will change the S3 default part size for the copy step.
# This value is useful when you want to upload large files.
# NB : While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
# While Minio uses a hard coded value to 10.000. As a workaround, try to set a higher value.
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
# The unit is in MB and an integer.
# AWS_PART_SIZE=16
# You can also backup files to any WebDAV server: # You can also backup files to any WebDAV server:
# The URL of the remote WebDAV server # The URL of the remote WebDAV server

View File

@ -28,6 +28,7 @@ type Config struct {
AwsSecretAccessKey string `split_words:"true"` AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"` AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"` AwsIamRoleEndpoint string `split_words:"true"`
AwsPartSize int64 `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"` BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"` BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"` BackupFilenameExpand bool `split_words:"true"`

View File

@ -142,6 +142,7 @@ func newScript() (*script, error) {
BucketName: s.c.AwsS3BucketName, BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass, StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert, CACert: s.c.AwsEndpointCACert.Cert,
PartSize: s.c.AwsPartSize,
} }
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil { if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err return nil, err

View File

@ -8,6 +8,7 @@ import (
"crypto/x509" "crypto/x509"
"errors" "errors"
"fmt" "fmt"
"os"
"path" "path"
"path/filepath" "path/filepath"
"time" "time"
@ -22,6 +23,7 @@ type s3Storage struct {
client *minio.Client client *minio.Client
bucket string bucket string
storageClass string storageClass string
partSize int64
} }
// Config contains values that define the configuration of a S3 backend. // Config contains values that define the configuration of a S3 backend.
@ -35,6 +37,7 @@ type Config struct {
RemotePath string RemotePath string
BucketName string BucketName string
StorageClass string StorageClass string
PartSize int64
CACert *x509.Certificate CACert *x509.Certificate
} }
@ -89,6 +92,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
client: mc, client: mc,
bucket: opts.BucketName, bucket: opts.BucketName,
storageClass: opts.StorageClass, storageClass: opts.StorageClass,
partSize: opts.PartSize,
}, nil }, nil
} }
@ -100,16 +104,32 @@ func (v *s3Storage) Name() string {
// Copy copies the given file to the S3/Minio storage backend. // Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error { func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file) _, name := path.Split(file)
putObjectOptions := minio.PutObjectOptions{
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
ContentType: "application/tar+gzip", ContentType: "application/tar+gzip",
StorageClass: b.storageClass, StorageClass: b.storageClass,
}); err != nil { }
if b.partSize > 0 {
srcFileInfo, err := os.Stat(file)
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
}
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
}
putObjectOptions.PartSize = uint64(partSize)
}
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" { if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
} }
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err) return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
} }
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil return nil