storage/blob/s3: Force a smaller PartSize when blob size is unknown

Blob size would be unknown ahead of time if message store compression is used (e.g. in imapsql).

Part of #395 fix
This commit is contained in:
fox.cpp 2021-08-28 15:24:35 +03:00
parent 14a441f595
commit fc00133ee3
No known key found for this signature in database
GPG key ID: 5B991F6215D2FCC0

View file

@ -116,7 +116,16 @@ func (s *Store) Create(ctx context.Context, key string, blobSize int64) (module.
errCh := make(chan error, 1)
go func() {
_, err := s.cl.PutObject(ctx, s.bucketName, s.objectPrefix+key, pr, blobSize, minio.PutObjectOptions{})
partSize := uint64(0)
if blobSize == module.UnknownBlobSize {
// Without this, minio-go will allocate 500 MiB buffer which
// is a little too much.
// https://github.com/minio/minio-go/issues/1478
partSize = 1 * 1024 * 1024 /* 1 MiB */
}
_, err := s.cl.PutObject(ctx, s.bucketName, s.objectPrefix+key, pr, blobSize, minio.PutObjectOptions{
PartSize: partSize,
})
if err != nil {
pr.CloseWithError(fmt.Errorf("s3 PutObject: %w", err))
}