2015-05-10 17:20:58 +02:00
|
|
|
package s3
|
|
|
|
|
|
|
|
import (
|
2017-06-03 17:39:57 +02:00
|
|
|
"context"
|
2020-01-03 20:37:43 +01:00
|
|
|
"fmt"
|
2020-12-19 12:39:48 +01:00
|
|
|
"hash"
|
2015-05-10 17:20:58 +02:00
|
|
|
"io"
|
2017-09-24 20:04:23 +02:00
|
|
|
"net/http"
|
2017-05-13 23:55:22 +02:00
|
|
|
"os"
|
2016-02-15 18:47:10 +01:00
|
|
|
"path"
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
"slices"
|
2015-05-10 17:20:58 +02:00
|
|
|
"strings"
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
"time"
|
2015-05-10 17:20:58 +02:00
|
|
|
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
"github.com/cenkalti/backoff/v4"
|
2023-10-01 11:40:12 +02:00
|
|
|
"github.com/restic/restic/internal/backend"
|
2022-10-15 16:23:39 +02:00
|
|
|
"github.com/restic/restic/internal/backend/layout"
|
2023-06-08 13:04:34 +02:00
|
|
|
"github.com/restic/restic/internal/backend/location"
|
2023-10-01 10:24:33 +02:00
|
|
|
"github.com/restic/restic/internal/backend/util"
|
2020-12-17 12:47:53 +01:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-07-23 14:21:03 +02:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2024-05-12 11:55:34 +02:00
|
|
|
"github.com/restic/restic/internal/feature"
|
2016-08-21 17:46:23 +02:00
|
|
|
|
2020-09-19 21:57:02 +02:00
|
|
|
"github.com/minio/minio-go/v7"
|
|
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
2015-05-10 17:20:58 +02:00
|
|
|
)
|
|
|
|
|
2017-06-07 21:59:01 +02:00
|
|
|
// Backend stores data on an S3 endpoint.
|
|
|
|
type Backend struct {
|
2017-06-15 16:41:09 +02:00
|
|
|
client *minio.Client
|
|
|
|
cfg Config
|
2022-10-15 16:23:39 +02:00
|
|
|
layout.Layout
|
2015-05-16 00:29:48 +02:00
|
|
|
}
|
|
|
|
|
2017-06-07 21:59:01 +02:00
|
|
|
// make sure that *Backend implements backend.Backend
|
2023-10-01 11:40:12 +02:00
|
|
|
var _ backend.Backend = &Backend{}
|
2017-06-03 17:39:57 +02:00
|
|
|
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
var archiveClasses = []string{"GLACIER", "DEEP_ARCHIVE"}
|
|
|
|
|
|
|
|
type warmupStatus int
|
|
|
|
|
|
|
|
const (
|
|
|
|
warmupStatusCold warmupStatus = iota
|
|
|
|
warmupStatusWarmingUp
|
|
|
|
warmupStatusWarm
|
|
|
|
warmupStatusLukewarm
|
|
|
|
)
|
|
|
|
|
2023-06-08 13:04:34 +02:00
|
|
|
func NewFactory() location.Factory {
|
2023-06-08 17:32:43 +02:00
|
|
|
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
2023-06-08 13:04:34 +02:00
|
|
|
}
|
|
|
|
|
2024-08-26 21:16:22 +02:00
|
|
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2016-09-27 22:35:08 +02:00
|
|
|
debug.Log("open, config %#v", cfg)
|
2015-05-10 17:20:58 +02:00
|
|
|
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
if cfg.EnableRestore && !feature.Flag.Enabled(feature.S3Restore) {
|
|
|
|
return nil, fmt.Errorf("feature flag `s3-restore` is required to use `-o s3.enable-restore=true`")
|
|
|
|
}
|
|
|
|
|
2023-06-08 15:28:07 +02:00
|
|
|
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
|
|
|
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
|
|
|
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
|
|
|
return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
|
|
|
|
}
|
|
|
|
|
2017-06-12 21:09:26 +02:00
|
|
|
if cfg.MaxRetries > 0 {
|
|
|
|
minio.MaxRetry = int(cfg.MaxRetries)
|
|
|
|
}
|
|
|
|
|
2024-07-07 11:32:40 +02:00
|
|
|
creds, err := getCredentials(cfg, rt)
|
2021-09-30 20:45:31 +02:00
|
|
|
if err != nil {
|
2023-09-18 20:09:32 +02:00
|
|
|
return nil, errors.Wrap(err, "s3.getCredentials")
|
2021-09-30 20:45:31 +02:00
|
|
|
}
|
|
|
|
|
2020-01-03 20:37:43 +01:00
|
|
|
options := &minio.Options{
|
2020-09-19 21:57:02 +02:00
|
|
|
Creds: creds,
|
|
|
|
Secure: !cfg.UseHTTP,
|
|
|
|
Region: cfg.Region,
|
|
|
|
Transport: rt,
|
2020-01-03 20:37:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch strings.ToLower(cfg.BucketLookup) {
|
|
|
|
case "", "auto":
|
|
|
|
options.BucketLookup = minio.BucketLookupAuto
|
|
|
|
case "dns":
|
|
|
|
options.BucketLookup = minio.BucketLookupDNS
|
|
|
|
case "path":
|
|
|
|
options.BucketLookup = minio.BucketLookupPath
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf(`bad bucket-lookup style %q must be "auto", "path" or "dns"`, cfg.BucketLookup)
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err := minio.New(cfg.Endpoint, options)
|
2017-10-09 21:48:42 +02:00
|
|
|
if err != nil {
|
2020-09-19 21:57:02 +02:00
|
|
|
return nil, errors.Wrap(err, "minio.New")
|
2015-08-26 13:25:05 +02:00
|
|
|
}
|
|
|
|
|
2017-06-07 21:59:01 +02:00
|
|
|
be := &Backend{
|
2017-06-15 16:41:09 +02:00
|
|
|
client: client,
|
|
|
|
cfg: cfg,
|
2024-08-26 20:28:39 +02:00
|
|
|
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
2017-03-14 23:05:51 +01:00
|
|
|
}
|
2017-02-10 19:24:54 +01:00
|
|
|
|
2017-06-17 22:15:58 +02:00
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2023-09-18 20:09:32 +02:00
|
|
|
// getCredentials -- runs through the various credential types and returns the first one that works.
|
|
|
|
// additionally if the user has specified a role to assume, it will do that as well.
|
2024-07-07 11:32:40 +02:00
|
|
|
func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials, error) {
|
2024-07-08 19:42:00 +02:00
|
|
|
if cfg.UnsafeAnonymousAuth {
|
|
|
|
return credentials.New(&credentials.Static{}), nil
|
|
|
|
}
|
|
|
|
|
2023-09-18 20:09:32 +02:00
|
|
|
// Chains all credential types, in the following order:
|
|
|
|
// - Static credentials provided by user
|
|
|
|
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
|
|
|
|
// - Minio env vars (i.e. MINIO_ACCESS_KEY)
|
|
|
|
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
|
|
|
|
// - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json)
|
|
|
|
// - IAM profile based credentials. (performs an HTTP
|
|
|
|
// call to a pre-defined endpoint, only valid inside
|
|
|
|
// configured ec2 instances)
|
|
|
|
creds := credentials.NewChainCredentials([]credentials.Provider{
|
2024-01-06 21:43:41 +01:00
|
|
|
&credentials.EnvAWS{},
|
2023-09-18 20:09:32 +02:00
|
|
|
&credentials.Static{
|
|
|
|
Value: credentials.Value{
|
|
|
|
AccessKeyID: cfg.KeyID,
|
|
|
|
SecretAccessKey: cfg.Secret.Unwrap(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&credentials.EnvMinio{},
|
|
|
|
&credentials.FileAWSCredentials{},
|
|
|
|
&credentials.FileMinioClient{},
|
|
|
|
&credentials.IAM{
|
|
|
|
Client: &http.Client{
|
2024-07-07 11:32:40 +02:00
|
|
|
Transport: tr,
|
2023-09-18 20:09:32 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
c, err := creds.Get()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "creds.Get")
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.SignerType == credentials.SignatureAnonymous {
|
2024-07-08 19:42:00 +02:00
|
|
|
// Fail if no credentials were found to prevent repeated attempts to (unsuccessfully) retrieve new credentials.
|
|
|
|
// The first attempt still has to timeout which slows down restic usage considerably. Thus, migrate towards forcing
|
|
|
|
// users to explicitly decide between authenticated and anonymous access.
|
2024-11-30 21:22:51 +01:00
|
|
|
return nil, fmt.Errorf("no credentials found. Use `-o s3.unsafe-anonymous-auth=true` for anonymous authentication")
|
2023-09-18 20:09:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
roleArn := os.Getenv("RESTIC_AWS_ASSUME_ROLE_ARN")
|
|
|
|
if roleArn != "" {
|
|
|
|
// use the region provided by the configuration by default
|
|
|
|
awsRegion := cfg.Region
|
|
|
|
// allow the region to be overridden if for some reason it is required
|
2024-01-06 21:44:53 +01:00
|
|
|
if os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION") != "" {
|
2023-09-18 20:09:32 +02:00
|
|
|
awsRegion = os.Getenv("RESTIC_AWS_ASSUME_ROLE_REGION")
|
|
|
|
}
|
|
|
|
|
|
|
|
sessionName := os.Getenv("RESTIC_AWS_ASSUME_ROLE_SESSION_NAME")
|
|
|
|
externalID := os.Getenv("RESTIC_AWS_ASSUME_ROLE_EXTERNAL_ID")
|
|
|
|
policy := os.Getenv("RESTIC_AWS_ASSUME_ROLE_POLICY")
|
|
|
|
stsEndpoint := os.Getenv("RESTIC_AWS_ASSUME_ROLE_STS_ENDPOINT")
|
|
|
|
|
|
|
|
if stsEndpoint == "" {
|
2024-01-06 21:44:53 +01:00
|
|
|
if awsRegion != "" {
|
2023-09-18 20:09:32 +02:00
|
|
|
if strings.HasPrefix(awsRegion, "cn-") {
|
|
|
|
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com.cn"
|
|
|
|
} else {
|
|
|
|
stsEndpoint = "https://sts." + awsRegion + ".amazonaws.com"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
stsEndpoint = "https://sts.amazonaws.com"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := credentials.STSAssumeRoleOptions{
|
|
|
|
RoleARN: roleArn,
|
|
|
|
AccessKey: c.AccessKeyID,
|
|
|
|
SecretKey: c.SecretAccessKey,
|
|
|
|
SessionToken: c.SessionToken,
|
|
|
|
RoleSessionName: sessionName,
|
|
|
|
ExternalID: externalID,
|
|
|
|
Policy: policy,
|
2024-01-06 21:44:53 +01:00
|
|
|
Location: awsRegion,
|
2023-09-18 20:09:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
creds, err = credentials.NewSTSAssumeRole(stsEndpoint, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "creds.AssumeRole")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return creds, nil
|
|
|
|
}
|
|
|
|
|
2017-06-17 22:15:58 +02:00
|
|
|
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
|
|
|
// does not exist yet.
|
2024-08-26 21:16:22 +02:00
|
|
|
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
|
|
|
return open(cfg, rt)
|
2017-06-17 22:15:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create opens the S3 backend at bucket and region and creates the bucket if
|
|
|
|
// it does not exist yet.
|
2023-10-01 11:40:12 +02:00
|
|
|
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
2024-08-26 21:16:22 +02:00
|
|
|
be, err := open(cfg, rt)
|
2017-07-17 10:33:19 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open")
|
|
|
|
}
|
2020-09-19 21:57:02 +02:00
|
|
|
found, err := be.client.BucketExists(ctx, cfg.Bucket)
|
2018-03-02 09:47:20 +01:00
|
|
|
|
2022-06-13 20:35:37 +02:00
|
|
|
if err != nil && isAccessDenied(err) {
|
2018-03-02 09:47:20 +01:00
|
|
|
err = nil
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
|
2016-08-21 16:14:58 +02:00
|
|
|
if err != nil {
|
2016-12-05 23:12:30 +01:00
|
|
|
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
|
2016-08-29 21:54:50 +02:00
|
|
|
return nil, errors.Wrap(err, "client.BucketExists")
|
2016-08-21 16:14:58 +02:00
|
|
|
}
|
2015-12-28 18:55:15 +01:00
|
|
|
|
2016-12-05 23:12:30 +01:00
|
|
|
if !found {
|
2016-01-03 21:46:07 +01:00
|
|
|
// create new bucket with default ACL in default region
|
2020-09-19 21:57:02 +02:00
|
|
|
err = be.client.MakeBucket(ctx, cfg.Bucket, minio.MakeBucketOptions{})
|
2016-01-03 21:46:07 +01:00
|
|
|
if err != nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
return nil, errors.Wrap(err, "client.MakeBucket")
|
2016-01-03 21:46:07 +01:00
|
|
|
}
|
2015-11-06 22:31:59 +01:00
|
|
|
}
|
|
|
|
|
2015-12-06 23:21:48 +01:00
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2022-06-13 20:35:37 +02:00
|
|
|
// isAccessDenied returns true if the error is caused by Access Denied.
|
|
|
|
func isAccessDenied(err error) bool {
|
|
|
|
debug.Log("isAccessDenied(%T, %#v)", err, err)
|
2018-03-02 09:47:20 +01:00
|
|
|
|
2022-06-13 20:35:37 +02:00
|
|
|
var e minio.ErrorResponse
|
2022-12-13 07:51:44 +01:00
|
|
|
return errors.As(err, &e) && e.Code == "AccessDenied"
|
2018-03-02 09:47:20 +01:00
|
|
|
}
|
|
|
|
|
2017-05-15 23:37:02 +02:00
|
|
|
// IsNotExist returns true if the error is caused by a not existing file.
|
2017-06-07 21:59:01 +02:00
|
|
|
func (be *Backend) IsNotExist(err error) bool {
|
2022-06-13 20:35:37 +02:00
|
|
|
var e minio.ErrorResponse
|
|
|
|
return errors.As(err, &e) && e.Code == "NoSuchKey"
|
2017-05-15 23:37:02 +02:00
|
|
|
}
|
|
|
|
|
2024-05-11 00:11:23 +02:00
|
|
|
func (be *Backend) IsPermanentError(err error) bool {
|
|
|
|
if be.IsNotExist(err) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
var merr minio.ErrorResponse
|
|
|
|
if errors.As(err, &merr) {
|
|
|
|
if merr.Code == "InvalidRange" || merr.Code == "AccessDenied" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-08-07 22:20:49 +02:00
|
|
|
func (be *Backend) Connections() uint {
|
|
|
|
return be.cfg.Connections
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:39:48 +01:00
|
|
|
// Hasher may return a hash function for calculating a content hash for the backend
|
|
|
|
func (be *Backend) Hasher() hash.Hash {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-01 20:07:29 +02:00
|
|
|
// HasAtomicReplace returns whether Save() can atomically replace files
|
|
|
|
func (be *Backend) HasAtomicReplace() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-06-07 22:54:37 +02:00
|
|
|
// Path returns the path in the bucket that is used for this backend.
|
|
|
|
func (be *Backend) Path() string {
|
2017-06-15 16:41:09 +02:00
|
|
|
return be.cfg.Prefix
|
2015-05-10 17:20:58 +02:00
|
|
|
}
|
|
|
|
|
2023-12-04 23:20:27 +01:00
|
|
|
// useStorageClass returns whether file should be saved in the provided Storage Class
|
2024-01-20 11:18:09 +01:00
|
|
|
// For archive storage classes, only data files are stored using that class; metadata
|
|
|
|
// must remain instantly accessible.
|
2023-12-04 23:20:27 +01:00
|
|
|
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
|
|
|
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
isArchiveClass := slices.Contains(archiveClasses, be.cfg.StorageClass)
|
|
|
|
return !isArchiveClass || isDataFile
|
2023-12-04 23:20:27 +01:00
|
|
|
}
|
|
|
|
|
2016-01-24 01:15:35 +01:00
|
|
|
// Save stores data in the backend at the handle.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
2017-04-11 22:04:18 +02:00
|
|
|
objName := be.Filename(h)
|
2017-04-17 19:18:47 +02:00
|
|
|
|
2024-01-20 11:18:09 +01:00
|
|
|
opts := minio.PutObjectOptions{
|
|
|
|
ContentType: "application/octet-stream",
|
|
|
|
// the only option with the high-level api is to let the library handle the checksum computation
|
|
|
|
SendContentMd5: true,
|
|
|
|
// only use multipart uploads for very large files
|
|
|
|
PartSize: 200 * 1024 * 1024,
|
|
|
|
}
|
2023-12-04 23:20:27 +01:00
|
|
|
if be.useStorageClass(h) {
|
|
|
|
opts.StorageClass = be.cfg.StorageClass
|
|
|
|
}
|
2017-12-08 21:52:50 +01:00
|
|
|
|
2022-12-02 19:36:43 +01:00
|
|
|
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
|
2017-06-11 11:15:15 +02:00
|
|
|
|
2020-12-18 23:41:29 +01:00
|
|
|
// sanity check
|
2021-03-08 20:23:57 +01:00
|
|
|
if err == nil && info.Size != rd.Length() {
|
2020-12-18 23:41:29 +01:00
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length())
|
|
|
|
}
|
2016-01-24 01:15:35 +01:00
|
|
|
|
2016-08-29 21:54:50 +02:00
|
|
|
return errors.Wrap(err, "client.PutObject")
|
2016-01-24 01:15:35 +01:00
|
|
|
}
|
|
|
|
|
2018-01-17 05:59:16 +01:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
2023-04-07 23:02:35 +02:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2023-10-01 10:24:33 +02:00
|
|
|
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
2018-01-17 05:59:16 +01:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-04-11 22:04:18 +02:00
|
|
|
objName := be.Filename(h)
|
2017-12-08 21:52:50 +01:00
|
|
|
opts := minio.GetObjectOptions{}
|
2017-01-22 22:01:12 +01:00
|
|
|
|
2017-12-08 21:52:50 +01:00
|
|
|
var err error
|
2017-05-13 21:18:14 +02:00
|
|
|
if length > 0 {
|
2017-12-08 21:52:50 +01:00
|
|
|
err = opts.SetRange(offset, offset+int64(length)-1)
|
|
|
|
} else if offset > 0 {
|
|
|
|
err = opts.SetRange(offset, 0)
|
2017-01-22 22:01:12 +01:00
|
|
|
}
|
2017-06-11 14:30:56 +02:00
|
|
|
|
2017-12-08 21:52:50 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "SetRange")
|
|
|
|
}
|
2017-01-22 22:01:12 +01:00
|
|
|
|
2017-06-06 00:17:39 +02:00
|
|
|
coreClient := minio.Core{Client: be.client}
|
2024-05-11 00:11:23 +02:00
|
|
|
rd, info, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts)
|
2017-05-14 00:09:59 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-22 22:01:12 +01:00
|
|
|
|
2024-05-12 11:55:34 +02:00
|
|
|
if feature.Flag.Enabled(feature.BackendErrorRedesign) && length > 0 {
|
2024-05-11 00:11:23 +02:00
|
|
|
if info.Size > 0 && info.Size != int64(length) {
|
|
|
|
_ = rd.Close()
|
|
|
|
return nil, minio.ErrorResponse{Code: "InvalidRange", Message: "restic-file-too-short"}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-07 23:02:35 +02:00
|
|
|
return rd, err
|
2017-01-22 22:01:12 +01:00
|
|
|
}
|
|
|
|
|
2016-01-23 23:27:58 +01:00
|
|
|
// Stat returns information about a blob.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (bi backend.FileInfo, err error) {
|
2017-04-11 22:04:18 +02:00
|
|
|
objName := be.Filename(h)
|
2016-08-21 16:15:24 +02:00
|
|
|
var obj *minio.Object
|
|
|
|
|
2017-12-08 21:52:50 +01:00
|
|
|
opts := minio.GetObjectOptions{}
|
|
|
|
|
2020-09-19 21:57:02 +02:00
|
|
|
obj, err = be.client.GetObject(ctx, be.cfg.Bucket, objName, opts)
|
2016-01-23 23:27:58 +01:00
|
|
|
if err != nil {
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{}, errors.Wrap(err, "client.GetObject")
|
2016-01-23 23:27:58 +01:00
|
|
|
}
|
|
|
|
|
2016-08-21 16:15:24 +02:00
|
|
|
// make sure that the object is closed properly.
|
|
|
|
defer func() {
|
|
|
|
e := obj.Close()
|
|
|
|
if err == nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
err = errors.Wrap(e, "Close")
|
2016-08-21 16:15:24 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-01-23 23:27:58 +01:00
|
|
|
fi, err := obj.Stat()
|
|
|
|
if err != nil {
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{}, errors.Wrap(err, "Stat")
|
2016-01-23 23:27:58 +01:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{Size: fi.Size, Name: h.Name}, nil
|
2016-01-23 23:27:58 +01:00
|
|
|
}
|
|
|
|
|
2015-05-10 17:20:58 +02:00
|
|
|
// Remove removes the blob with the given name and type.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
|
2017-04-11 22:04:18 +02:00
|
|
|
objName := be.Filename(h)
|
2017-11-01 14:40:54 +01:00
|
|
|
|
2020-09-19 21:57:02 +02:00
|
|
|
err := be.client.RemoveObject(ctx, be.cfg.Bucket, objName, minio.RemoveObjectOptions{})
|
2017-11-01 14:40:54 +01:00
|
|
|
|
2017-06-15 16:06:47 +02:00
|
|
|
if be.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
|
2016-08-29 21:54:50 +02:00
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
2015-05-10 17:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
// List runs fn for each file in the backend which has the type t. When an
|
|
|
|
// error occurs (or fn returns an error), List stops and returns it.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
|
2017-12-14 19:13:01 +01:00
|
|
|
prefix, recursive := be.Basedir(t)
|
2015-05-10 17:20:58 +02:00
|
|
|
|
2017-05-15 23:37:02 +02:00
|
|
|
// make sure prefix ends with a slash
|
2018-01-20 19:34:38 +01:00
|
|
|
if !strings.HasSuffix(prefix, "/") {
|
2017-05-15 23:37:02 +02:00
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-11-11 11:49:20 +01:00
|
|
|
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
|
|
|
|
|
2017-11-02 23:29:32 +01:00
|
|
|
// NB: unfortunately we can't protect this with be.sem.GetToken() here.
|
|
|
|
// Doing so would enable a deadlock situation (gh-1399), as ListObjects()
|
|
|
|
// starts its own goroutine and returns results via a channel.
|
2020-09-19 21:57:02 +02:00
|
|
|
listresp := be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
|
|
|
|
Prefix: prefix,
|
|
|
|
Recursive: recursive,
|
2020-11-11 11:49:20 +01:00
|
|
|
UseV1: be.cfg.ListObjectsV1,
|
2020-09-19 21:57:02 +02:00
|
|
|
})
|
2015-05-13 19:48:52 +02:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
for obj := range listresp {
|
2018-06-01 22:15:23 +02:00
|
|
|
if obj.Err != nil {
|
|
|
|
return obj.Err
|
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
m := strings.TrimPrefix(obj.Key, prefix)
|
|
|
|
if m == "" {
|
|
|
|
continue
|
2015-05-10 17:20:58 +02:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
fi := backend.FileInfo{
|
2018-01-20 19:34:38 +01:00
|
|
|
Name: path.Base(m),
|
|
|
|
Size: obj.Size,
|
|
|
|
}
|
2015-05-10 17:20:58 +02:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
err := fn(fi)
|
2015-12-06 23:21:48 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-20 19:34:38 +01:00
|
|
|
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2015-06-14 15:02:29 +02:00
|
|
|
}
|
2015-12-06 23:21:48 +01:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2015-12-19 13:23:05 +01:00
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
2017-06-07 21:59:01 +02:00
|
|
|
func (be *Backend) Delete(ctx context.Context) error {
|
2023-10-01 10:24:33 +02:00
|
|
|
return util.DefaultDelete(ctx, be)
|
2015-05-10 17:20:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
2017-06-07 21:59:01 +02:00
|
|
|
func (be *Backend) Close() error { return nil }
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
|
|
|
|
// Warmup transitions handles from cold to hot storage if needed.
|
|
|
|
func (be *Backend) Warmup(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
|
|
|
|
handlesWarmingUp := []backend.Handle{}
|
|
|
|
|
|
|
|
if be.cfg.EnableRestore {
|
|
|
|
for _, h := range handles {
|
|
|
|
filename := be.Filename(h)
|
|
|
|
isWarmingUp, err := be.requestRestore(ctx, filename)
|
|
|
|
if err != nil {
|
|
|
|
return handlesWarmingUp, err
|
|
|
|
}
|
|
|
|
if isWarmingUp {
|
|
|
|
debug.Log("s3 file is being restored: %s", filename)
|
|
|
|
handlesWarmingUp = append(handlesWarmingUp, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return handlesWarmingUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// requestRestore sends a glacier restore request on a given file.
|
|
|
|
func (be *Backend) requestRestore(ctx context.Context, filename string) (bool, error) {
|
|
|
|
objectInfo, err := be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws := be.getWarmupStatus(objectInfo)
|
|
|
|
switch ws {
|
|
|
|
case warmupStatusWarm:
|
|
|
|
return false, nil
|
|
|
|
case warmupStatusWarmingUp:
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := minio.RestoreRequest{}
|
|
|
|
opts.SetDays(be.cfg.RestoreDays)
|
|
|
|
opts.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierType(be.cfg.RestoreTier)})
|
|
|
|
|
|
|
|
if err := be.client.RestoreObject(ctx, be.cfg.Bucket, filename, "", opts); err != nil {
|
|
|
|
var e minio.ErrorResponse
|
|
|
|
if errors.As(err, &e) {
|
|
|
|
switch e.Code {
|
|
|
|
case "InvalidObjectState":
|
|
|
|
return false, nil
|
|
|
|
case "RestoreAlreadyInProgress":
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
isWarmingUp := ws != warmupStatusLukewarm
|
|
|
|
return isWarmingUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getWarmupStatus returns the warmup status of the provided object.
|
|
|
|
func (be *Backend) getWarmupStatus(objectInfo minio.ObjectInfo) warmupStatus {
|
|
|
|
// We can't use objectInfo.StorageClass to get the storage class of the
|
|
|
|
// object because this field is only set during ListObjects operations.
|
|
|
|
// The response header is the documented way to get the storage class
|
|
|
|
// for GetObject/StatObject operations.
|
|
|
|
storageClass := objectInfo.Metadata.Get("X-Amz-Storage-Class")
|
|
|
|
isArchiveClass := slices.Contains(archiveClasses, storageClass)
|
|
|
|
if !isArchiveClass {
|
|
|
|
return warmupStatusWarm
|
|
|
|
}
|
|
|
|
|
|
|
|
restore := objectInfo.Restore
|
|
|
|
if restore != nil {
|
|
|
|
if restore.OngoingRestore {
|
|
|
|
return warmupStatusWarmingUp
|
|
|
|
}
|
|
|
|
|
|
|
|
minExpiryTime := time.Now().Add(time.Duration(be.cfg.RestoreDays) * 24 * time.Hour)
|
|
|
|
expiryTime := restore.ExpiryTime
|
|
|
|
if !expiryTime.IsZero() {
|
|
|
|
if minExpiryTime.Before(expiryTime) {
|
|
|
|
return warmupStatusWarm
|
|
|
|
}
|
|
|
|
return warmupStatusLukewarm
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return warmupStatusCold
|
|
|
|
}
|
|
|
|
|
|
|
|
// WarmupWait waits until all handles are in hot storage.
|
|
|
|
func (be *Backend) WarmupWait(ctx context.Context, handles []backend.Handle) error {
|
|
|
|
timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, be.cfg.RestoreTimeout)
|
|
|
|
defer timeoutCtxCancel()
|
|
|
|
|
|
|
|
if be.cfg.EnableRestore {
|
|
|
|
for _, h := range handles {
|
|
|
|
filename := be.Filename(h)
|
|
|
|
err := be.waitForRestore(timeoutCtx, filename)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
debug.Log("s3 file is restored: %s", filename)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForRestore waits for a given file to be restored.
|
|
|
|
func (be *Backend) waitForRestore(ctx context.Context, filename string) error {
|
|
|
|
for {
|
|
|
|
var objectInfo minio.ObjectInfo
|
|
|
|
|
|
|
|
// Restore requests can last many hours, therefore network may fail
|
|
|
|
// temporarily. We don't need to die in such even.
|
|
|
|
b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 10)
|
|
|
|
b = backoff.WithContext(b, ctx)
|
|
|
|
err := backoff.Retry(
|
|
|
|
func() (err error) {
|
|
|
|
objectInfo, err = be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
|
|
|
return
|
|
|
|
},
|
|
|
|
b,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws := be.getWarmupStatus(objectInfo)
|
|
|
|
switch ws {
|
|
|
|
case warmupStatusLukewarm:
|
|
|
|
fallthrough
|
|
|
|
case warmupStatusWarm:
|
|
|
|
return nil
|
|
|
|
case warmupStatusCold:
|
|
|
|
return errors.New("waiting on S3 handle that is not warming up")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(1 * time.Minute):
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|