mirror of https://github.com/restic/restic.git
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores This commit introduces basic support for transitioning pack files stored in cold storage to hot storage on S3 and S3-compatible providers. To prevent unexpected behavior for existing users, the feature is gated behind new flags: - `s3.enable-restore`: opt-in flag (defaults to false) - `s3.restore-days`: number of days for the restored objects to remain in hot storage (defaults to `7`) - `s3.restore-timeout`: maximum time to wait for a single restoration (default to `1 day`) - `s3.restore-tier`: retrieval tier at which the restore will be processed. (default to `Standard`) As restoration times can be lengthy, this implementation preemptively restores selected packs to prevent incessant restore-delays during downloads. This is slightly sub-optimal as we could process packs out-of-order (as soon as they're transitioned), but this would really add too much complexity for a marginal gain in speed. To maintain simplicity and prevent resources exhautions with lots of packs, no new concurrency mechanisms or goroutines were added. This just hooks gracefully into the existing routines. **Limitations:** - Tests against the backend were not written due to the lack of cold storage class support in MinIO. Testing was done manually on Scaleway's S3-compatible object storage. If necessary, we could explore testing with LocalStack or mocks, though this requires further discussion. - Currently, this feature only warms up before restores and repacks (prune/copy), as those are the two main use-cases I came across. Support for other commands may be added in future iterations, as long as affected packs can be calculated in advance. - The feature is gated behind a new alpha `s3-restore` feature flag to make it explicit that the feature is still wet behind the ears. - There is no explicit user notification for ongoing pack restorations. While I think it is not necessary because of the opt-in flag, showing some notice may improve usability (but would probably require major refactoring in the progress bar which I didn't want to start). Another possibility would be to add a flag to send restores requests and fail early. See https://github.com/restic/restic/issues/3202 * ui: warn user when files are warming up from cold storage * refactor: remove the PacksWarmer struct It's easier to handle multiple handles in the backend directly, and it may open the door to reducing the number of requests made to the backend in the future.
This commit is contained in:
parent
9566e2db4a
commit
536ebefff4
|
@ -0,0 +1,10 @@
|
||||||
|
Enhancement: Add warmup support on S3 backend before repacks and restores
|
||||||
|
|
||||||
|
Introduce S3 backend options for transitioning pack files from cold to hot
|
||||||
|
storage on S3 and S3-compatible providers. Note: only works before repacks
|
||||||
|
(prune/copy) and restore for now, and gated behind a new "s3-restore" feature
|
||||||
|
flag.
|
||||||
|
|
||||||
|
https://github.com/restic/restic/pull/5173
|
||||||
|
https://github.com/restic/restic/issues/3202
|
||||||
|
https://github.com/restic/restic/issues/2504
|
|
@ -237,7 +237,15 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
|
||||||
}
|
}
|
||||||
|
|
||||||
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
|
||||||
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
|
_, err = repository.Repack(
|
||||||
|
ctx,
|
||||||
|
srcRepo,
|
||||||
|
dstRepo,
|
||||||
|
packList,
|
||||||
|
copyBlobs,
|
||||||
|
bar,
|
||||||
|
func(msg string, args ...interface{}) { fmt.Printf(msg+"\n", args...) },
|
||||||
|
)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatal(err.Error())
|
return errors.Fatal(err.Error())
|
||||||
|
|
|
@ -179,6 +179,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
||||||
res.Warn = func(message string) {
|
res.Warn = func(message string) {
|
||||||
msg.E("Warning: %s\n", message)
|
msg.E("Warning: %s\n", message)
|
||||||
}
|
}
|
||||||
|
res.Info = func(message string) {
|
||||||
|
if gopts.JSON {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
msg.P("Info: %s\n", message)
|
||||||
|
}
|
||||||
|
|
||||||
selectExcludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) {
|
selectExcludeFilter := func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool) {
|
||||||
matched := false
|
matched := false
|
||||||
|
|
30
doc/faq.rst
30
doc/faq.rst
|
@ -242,3 +242,33 @@ collect a list of all files, causing the following error:
|
||||||
List(data) returned error, retrying after 1s: [...]: request timeout
|
List(data) returned error, retrying after 1s: [...]: request timeout
|
||||||
|
|
||||||
In this case you can increase the timeout using the ``--stuck-request-timeout`` option.
|
In this case you can increase the timeout using the ``--stuck-request-timeout`` option.
|
||||||
|
|
||||||
|
Are "cold storages" supported?
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Generally, restic does not natively support "cold storage" solutions. However,
|
||||||
|
experimental support for restoring from **S3 Glacier** and **S3 Glacier Deep
|
||||||
|
Archive** storage classes is available:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ restic backup -o s3.storage-class=GLACIER somedir/
|
||||||
|
$ RESTIC_FEATURES=s3-restore restic restore -o s3.enable-restore=1 -o s3.restore-days=7 -o s3.restore-timeout=1d latest
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
|
||||||
|
- This feature is still in early alpha stage. Expect arbitrary breaking changes
|
||||||
|
in the future (although we'll do our best-effort to avoid them).
|
||||||
|
- Expect restores to hang from 1 up to 42 hours depending on your storage
|
||||||
|
class, provider and luck. Restores from cold storages are known to be
|
||||||
|
time-consuming. You may need to adjust the `s3.restore-timeout` if a restore
|
||||||
|
operation takes more than 24 hours.
|
||||||
|
- Restic will prevent sending metadata files (such as config files, lock files
|
||||||
|
or tree blobs) to Glacier or Deep Archive. Standard class is used instead to
|
||||||
|
ensure normal and fast operations for most tasks.
|
||||||
|
- Currently, only the following commands are known to work:
|
||||||
|
|
||||||
|
- `backup`
|
||||||
|
- `copy`
|
||||||
|
- `prune`
|
||||||
|
- `restore`
|
||||||
|
|
|
@ -475,3 +475,9 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||||
|
|
||||||
// Close does nothing
|
// Close does nothing
|
||||||
func (be *Backend) Close() error { return nil }
|
func (be *Backend) Close() error { return nil }
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -335,3 +335,9 @@ func (be *b2Backend) Delete(ctx context.Context) error {
|
||||||
|
|
||||||
// Close does nothing
|
// Close does nothing
|
||||||
func (be *b2Backend) Close() error { return nil }
|
func (be *b2Backend) Close() error { return nil }
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *b2Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *b2Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -75,6 +75,21 @@ type Backend interface {
|
||||||
|
|
||||||
// Delete removes all data in the backend.
|
// Delete removes all data in the backend.
|
||||||
Delete(ctx context.Context) error
|
Delete(ctx context.Context) error
|
||||||
|
|
||||||
|
// Warmup ensures that the specified handles are ready for upcoming reads.
|
||||||
|
// This is particularly useful for transitioning files from cold to hot
|
||||||
|
// storage.
|
||||||
|
//
|
||||||
|
// The method is non-blocking. WarmupWait can be used to wait for
|
||||||
|
// completion.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - Handles currently warming up.
|
||||||
|
// - An error if warmup fails.
|
||||||
|
Warmup(ctx context.Context, h []Handle) ([]Handle, error)
|
||||||
|
|
||||||
|
// WarmupWait waits until all given handles are warm.
|
||||||
|
WarmupWait(ctx context.Context, h []Handle) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Unwrapper interface {
|
type Unwrapper interface {
|
||||||
|
|
|
@ -258,3 +258,13 @@ func (b *Backend) List(ctx context.Context, t backend.FileType, fn func(f backen
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup delegates to wrapped backend.
|
||||||
|
func (b *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return b.Backend.Warmup(ctx, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmupWait delegates to wrapped backend.
|
||||||
|
func (b *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||||
|
return b.Backend.WarmupWait(ctx, h)
|
||||||
|
}
|
||||||
|
|
|
@ -82,3 +82,9 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse
|
||||||
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
|
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
|
||||||
return be.b.Stat(ctx, h)
|
return be.b.Stat(ctx, h)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup should not occur during dry-runs.
|
||||||
|
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -363,3 +363,9 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||||
|
|
||||||
// Close does nothing.
|
// Close does nothing.
|
||||||
func (be *Backend) Close() error { return nil }
|
func (be *Backend) Close() error { return nil }
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -371,3 +371,9 @@ func (b *Local) Close() error {
|
||||||
// same function.
|
// same function.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (b *Local) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (b *Local) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -249,3 +249,9 @@ func (be *MemoryBackend) Delete(ctx context.Context) error {
|
||||||
func (be *MemoryBackend) Close() error {
|
func (be *MemoryBackend) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -20,6 +20,8 @@ type Backend struct {
|
||||||
ListFn func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error
|
ListFn func(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error
|
||||||
RemoveFn func(ctx context.Context, h backend.Handle) error
|
RemoveFn func(ctx context.Context, h backend.Handle) error
|
||||||
DeleteFn func(ctx context.Context) error
|
DeleteFn func(ctx context.Context) error
|
||||||
|
WarmupFn func(ctx context.Context, h []backend.Handle) ([]backend.Handle, error)
|
||||||
|
WarmupWaitFn func(ctx context.Context, h []backend.Handle) error
|
||||||
ConnectionsFn func() uint
|
ConnectionsFn func() uint
|
||||||
HasherFn func() hash.Hash
|
HasherFn func() hash.Hash
|
||||||
HasAtomicReplaceFn func() bool
|
HasAtomicReplaceFn func() bool
|
||||||
|
@ -150,5 +152,21 @@ func (m *Backend) Delete(ctx context.Context) error {
|
||||||
return m.DeleteFn(ctx)
|
return m.DeleteFn(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
if m.WarmupFn == nil {
|
||||||
|
return []backend.Handle{}, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.WarmupFn(ctx, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||||
|
if m.WarmupWaitFn == nil {
|
||||||
|
return errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.WarmupWaitFn(ctx, h)
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure that Backend implements the backend interface.
|
// Make sure that Backend implements the backend interface.
|
||||||
var _ backend.Backend = &Backend{}
|
var _ backend.Backend = &Backend{}
|
||||||
|
|
|
@ -340,3 +340,9 @@ func (be *Backend) Close() error {
|
||||||
debug.Log("wait for rclone returned: %v", be.waitResult)
|
debug.Log("wait for rclone returned: %v", be.waitResult)
|
||||||
return be.waitResult
|
return be.waitResult
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -439,3 +439,9 @@ func (b *Backend) Close() error {
|
||||||
func (b *Backend) Delete(ctx context.Context) error {
|
func (b *Backend) Delete(ctx context.Context) error {
|
||||||
return util.DefaultDelete(ctx, b)
|
return util.DefaultDelete(ctx, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (b *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (b *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -289,3 +289,11 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend
|
||||||
func (be *Backend) Unwrap() backend.Backend {
|
func (be *Backend) Unwrap() backend.Backend {
|
||||||
return be.Backend
|
return be.Backend
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup delegates to wrapped backend
|
||||||
|
func (be *Backend) Warmup(ctx context.Context, h []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return be.Backend.Warmup(ctx, h)
|
||||||
|
}
|
||||||
|
func (be *Backend) WarmupWait(ctx context.Context, h []backend.Handle) error {
|
||||||
|
return be.Backend.WarmupWait(ctx, h)
|
||||||
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
|
@ -23,6 +24,11 @@ type Config struct {
|
||||||
Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"`
|
Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"`
|
||||||
StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"`
|
StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"`
|
||||||
|
|
||||||
|
EnableRestore bool `option:"enable-restore" help:"restore objects from GLACIER or DEEP_ARCHIVE storage classes (default: false, requires \"s3-restore\" feature flag)"`
|
||||||
|
RestoreDays int `option:"restore-days" help:"lifetime in days of restored object (default: 7)"`
|
||||||
|
RestoreTimeout time.Duration `option:"restore-timeout" help:"maximum time to wait for objects transition (default: 1d)"`
|
||||||
|
RestoreTier string `option:"restore-tier" help:"Retrieval tier at which the restore will be processed. (Standard, Bulk or Expedited) (default: Standard)"`
|
||||||
|
|
||||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||||
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
||||||
Region string `option:"region" help:"set region"`
|
Region string `option:"region" help:"set region"`
|
||||||
|
@ -36,6 +42,10 @@ func NewConfig() Config {
|
||||||
return Config{
|
return Config{
|
||||||
Connections: 5,
|
Connections: 5,
|
||||||
ListObjectsV1: false,
|
ListObjectsV1: false,
|
||||||
|
EnableRestore: false,
|
||||||
|
RestoreDays: 7,
|
||||||
|
RestoreTimeout: 24 * time.Hour,
|
||||||
|
RestoreTier: "Standard",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,117 +3,117 @@ package s3
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/backend/test"
|
"github.com/restic/restic/internal/backend/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newTestConfig(cfg Config) Config {
|
||||||
|
if cfg.Connections == 0 {
|
||||||
|
cfg.Connections = 5
|
||||||
|
}
|
||||||
|
if cfg.RestoreDays == 0 {
|
||||||
|
cfg.RestoreDays = 7
|
||||||
|
}
|
||||||
|
if cfg.RestoreTimeout == 0 {
|
||||||
|
cfg.RestoreTimeout = 24 * time.Hour
|
||||||
|
}
|
||||||
|
if cfg.RestoreTier == "" {
|
||||||
|
cfg.RestoreTier = "Standard"
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
var configTests = []test.ConfigTestData[Config]{
|
var configTests = []test.ConfigTestData[Config]{
|
||||||
{S: "s3://eu-central-1/bucketname", Cfg: Config{
|
{S: "s3://eu-central-1/bucketname", Cfg: newTestConfig(Config{
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "bucketname",
|
Bucket: "bucketname",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3://eu-central-1/bucketname/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3://eu-central-1/bucketname/", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "bucketname",
|
Bucket: "bucketname",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: newTestConfig(Config{
|
||||||
{S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "bucketname",
|
Bucket: "bucketname",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "bucketname",
|
Bucket: "bucketname",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:eu-central-1/foobar", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:eu-central-1/foobar", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:eu-central-1/foobar/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:eu-central-1/foobar/", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:eu-central-1/foobar/prefix/directory", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:eu-central-1/foobar/prefix/directory", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: Config{
|
|
||||||
Endpoint: "eu-central-1",
|
Endpoint: "eu-central-1",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:hostname.foo/foobar", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:hostname.foo/foobar", Cfg: Config{
|
|
||||||
Endpoint: "hostname.foo",
|
Endpoint: "hostname.foo",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: Config{
|
|
||||||
Endpoint: "hostname.foo",
|
Endpoint: "hostname.foo",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:https://hostname/foobar", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:https://hostname/foobar", Cfg: Config{
|
|
||||||
Endpoint: "hostname",
|
Endpoint: "hostname",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:https://hostname:9999/foobar", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:https://hostname:9999/foobar", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:https://hostname:9999/foobar/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:https://hostname:9999/foobar/", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:http://hostname:9999/foobar", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:http://hostname:9999/foobar", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
UseHTTP: true,
|
UseHTTP: true,
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:http://hostname:9999/foobar/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:http://hostname:9999/foobar/", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "foobar",
|
Bucket: "foobar",
|
||||||
Prefix: "",
|
Prefix: "",
|
||||||
UseHTTP: true,
|
UseHTTP: true,
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "bucket",
|
Bucket: "bucket",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
UseHTTP: true,
|
UseHTTP: true,
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
{S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: newTestConfig(Config{
|
||||||
{S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: Config{
|
|
||||||
Endpoint: "hostname:9999",
|
Endpoint: "hostname:9999",
|
||||||
Bucket: "bucket",
|
Bucket: "bucket",
|
||||||
Prefix: "prefix/directory",
|
Prefix: "prefix/directory",
|
||||||
UseHTTP: true,
|
UseHTTP: true,
|
||||||
Connections: 5,
|
})},
|
||||||
}},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseConfig(t *testing.T) {
|
func TestParseConfig(t *testing.T) {
|
||||||
|
|
|
@ -8,8 +8,11 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/restic/restic/internal/backend"
|
"github.com/restic/restic/internal/backend"
|
||||||
"github.com/restic/restic/internal/backend/layout"
|
"github.com/restic/restic/internal/backend/layout"
|
||||||
"github.com/restic/restic/internal/backend/location"
|
"github.com/restic/restic/internal/backend/location"
|
||||||
|
@ -32,6 +35,17 @@ type Backend struct {
|
||||||
// make sure that *Backend implements backend.Backend
|
// make sure that *Backend implements backend.Backend
|
||||||
var _ backend.Backend = &Backend{}
|
var _ backend.Backend = &Backend{}
|
||||||
|
|
||||||
|
var archiveClasses = []string{"GLACIER", "DEEP_ARCHIVE"}
|
||||||
|
|
||||||
|
type warmupStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
warmupStatusCold warmupStatus = iota
|
||||||
|
warmupStatusWarmingUp
|
||||||
|
warmupStatusWarm
|
||||||
|
warmupStatusLukewarm
|
||||||
|
)
|
||||||
|
|
||||||
func NewFactory() location.Factory {
|
func NewFactory() location.Factory {
|
||||||
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
||||||
}
|
}
|
||||||
|
@ -39,6 +53,10 @@ func NewFactory() location.Factory {
|
||||||
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||||
debug.Log("open, config %#v", cfg)
|
debug.Log("open, config %#v", cfg)
|
||||||
|
|
||||||
|
if cfg.EnableRestore && !feature.Flag.Enabled(feature.S3Restore) {
|
||||||
|
return nil, fmt.Errorf("feature flag `s3-restore` is required to use `-o s3.enable-restore=true`")
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
||||||
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
||||||
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
||||||
|
@ -266,9 +284,9 @@ func (be *Backend) Path() string {
|
||||||
// For archive storage classes, only data files are stored using that class; metadata
|
// For archive storage classes, only data files are stored using that class; metadata
|
||||||
// must remain instantly accessible.
|
// must remain instantly accessible.
|
||||||
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
||||||
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
|
||||||
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
||||||
return isDataFile || notArchiveClass
|
isArchiveClass := slices.Contains(archiveClasses, be.cfg.StorageClass)
|
||||||
|
return !isArchiveClass || isDataFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save stores data in the backend at the handle.
|
// Save stores data in the backend at the handle.
|
||||||
|
@ -440,3 +458,148 @@ func (be *Backend) Delete(ctx context.Context) error {
|
||||||
|
|
||||||
// Close does nothing
|
// Close does nothing
|
||||||
func (be *Backend) Close() error { return nil }
|
func (be *Backend) Close() error { return nil }
|
||||||
|
|
||||||
|
// Warmup transitions handles from cold to hot storage if needed.
|
||||||
|
func (be *Backend) Warmup(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
handlesWarmingUp := []backend.Handle{}
|
||||||
|
|
||||||
|
if be.cfg.EnableRestore {
|
||||||
|
for _, h := range handles {
|
||||||
|
filename := be.Filename(h)
|
||||||
|
isWarmingUp, err := be.requestRestore(ctx, filename)
|
||||||
|
if err != nil {
|
||||||
|
return handlesWarmingUp, err
|
||||||
|
}
|
||||||
|
if isWarmingUp {
|
||||||
|
debug.Log("s3 file is being restored: %s", filename)
|
||||||
|
handlesWarmingUp = append(handlesWarmingUp, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return handlesWarmingUp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestRestore sends a glacier restore request on a given file.
|
||||||
|
func (be *Backend) requestRestore(ctx context.Context, filename string) (bool, error) {
|
||||||
|
objectInfo, err := be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ws := be.getWarmupStatus(objectInfo)
|
||||||
|
switch ws {
|
||||||
|
case warmupStatusWarm:
|
||||||
|
return false, nil
|
||||||
|
case warmupStatusWarmingUp:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := minio.RestoreRequest{}
|
||||||
|
opts.SetDays(be.cfg.RestoreDays)
|
||||||
|
opts.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierType(be.cfg.RestoreTier)})
|
||||||
|
|
||||||
|
if err := be.client.RestoreObject(ctx, be.cfg.Bucket, filename, "", opts); err != nil {
|
||||||
|
var e minio.ErrorResponse
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
switch e.Code {
|
||||||
|
case "InvalidObjectState":
|
||||||
|
return false, nil
|
||||||
|
case "RestoreAlreadyInProgress":
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isWarmingUp := ws != warmupStatusLukewarm
|
||||||
|
return isWarmingUp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWarmupStatus returns the warmup status of the provided object.
|
||||||
|
func (be *Backend) getWarmupStatus(objectInfo minio.ObjectInfo) warmupStatus {
|
||||||
|
// We can't use objectInfo.StorageClass to get the storage class of the
|
||||||
|
// object because this field is only set during ListObjects operations.
|
||||||
|
// The response header is the documented way to get the storage class
|
||||||
|
// for GetObject/StatObject operations.
|
||||||
|
storageClass := objectInfo.Metadata.Get("X-Amz-Storage-Class")
|
||||||
|
isArchiveClass := slices.Contains(archiveClasses, storageClass)
|
||||||
|
if !isArchiveClass {
|
||||||
|
return warmupStatusWarm
|
||||||
|
}
|
||||||
|
|
||||||
|
restore := objectInfo.Restore
|
||||||
|
if restore != nil {
|
||||||
|
if restore.OngoingRestore {
|
||||||
|
return warmupStatusWarmingUp
|
||||||
|
}
|
||||||
|
|
||||||
|
minExpiryTime := time.Now().Add(time.Duration(be.cfg.RestoreDays) * 24 * time.Hour)
|
||||||
|
expiryTime := restore.ExpiryTime
|
||||||
|
if !expiryTime.IsZero() {
|
||||||
|
if minExpiryTime.Before(expiryTime) {
|
||||||
|
return warmupStatusWarm
|
||||||
|
}
|
||||||
|
return warmupStatusLukewarm
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return warmupStatusCold
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmupWait waits until all handles are in hot storage.
|
||||||
|
func (be *Backend) WarmupWait(ctx context.Context, handles []backend.Handle) error {
|
||||||
|
timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, be.cfg.RestoreTimeout)
|
||||||
|
defer timeoutCtxCancel()
|
||||||
|
|
||||||
|
if be.cfg.EnableRestore {
|
||||||
|
for _, h := range handles {
|
||||||
|
filename := be.Filename(h)
|
||||||
|
err := be.waitForRestore(timeoutCtx, filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
debug.Log("s3 file is restored: %s", filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForRestore waits for a given file to be restored.
|
||||||
|
func (be *Backend) waitForRestore(ctx context.Context, filename string) error {
|
||||||
|
for {
|
||||||
|
var objectInfo minio.ObjectInfo
|
||||||
|
|
||||||
|
// Restore requests can last many hours, therefore network may fail
|
||||||
|
// temporarily. We don't need to die in such even.
|
||||||
|
b := backoff.WithMaxRetries(backoff.NewExponentialBackOff(), 10)
|
||||||
|
b = backoff.WithContext(b, ctx)
|
||||||
|
err := backoff.Retry(
|
||||||
|
func() (err error) {
|
||||||
|
objectInfo, err = be.client.StatObject(ctx, be.cfg.Bucket, filename, minio.StatObjectOptions{})
|
||||||
|
return
|
||||||
|
},
|
||||||
|
b,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ws := be.getWarmupStatus(objectInfo)
|
||||||
|
switch ws {
|
||||||
|
case warmupStatusLukewarm:
|
||||||
|
fallthrough
|
||||||
|
case warmupStatusWarm:
|
||||||
|
return nil
|
||||||
|
case warmupStatusCold:
|
||||||
|
return errors.New("waiting on S3 handle that is not warming up")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(1 * time.Minute):
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -588,3 +588,9 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
|
||||||
func (r *SFTP) Delete(ctx context.Context) error {
|
func (r *SFTP) Delete(ctx context.Context) error {
|
||||||
return r.deleteRecursive(ctx, r.p)
|
return r.deleteRecursive(ctx, r.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (r *SFTP) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (r *SFTP) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -269,3 +269,9 @@ func (be *beSwift) Delete(ctx context.Context) error {
|
||||||
|
|
||||||
// Close does nothing
|
// Close does nothing
|
||||||
func (be *beSwift) Close() error { return nil }
|
func (be *beSwift) Close() error { return nil }
|
||||||
|
|
||||||
|
// Warmup not implemented
|
||||||
|
func (be *beSwift) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
func (be *beSwift) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|
||||||
|
|
|
@ -9,6 +9,7 @@ const (
|
||||||
DeviceIDForHardlinks FlagName = "device-id-for-hardlinks"
|
DeviceIDForHardlinks FlagName = "device-id-for-hardlinks"
|
||||||
ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth"
|
ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth"
|
||||||
SafeForgetKeepTags FlagName = "safe-forget-keep-tags"
|
SafeForgetKeepTags FlagName = "safe-forget-keep-tags"
|
||||||
|
S3Restore FlagName = "s3-restore"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -17,5 +18,6 @@ func init() {
|
||||||
DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"},
|
DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"},
|
||||||
ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"},
|
ExplicitS3AnonymousAuth: {Type: Stable, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"},
|
||||||
SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"},
|
SafeForgetKeepTags: {Type: Stable, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"},
|
||||||
|
S3Restore: {Type: Alpha, Description: "restore S3 objects from cold storage classes when `-o s3.enable-restore=true` is set"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -557,7 +557,7 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) er
|
||||||
printer.P("repacking packs\n")
|
printer.P("repacking packs\n")
|
||||||
bar := printer.NewCounter("packs repacked")
|
bar := printer.NewCounter("packs repacked")
|
||||||
bar.SetMax(uint64(len(plan.repackPacks)))
|
bar.SetMax(uint64(len(plan.repackPacks)))
|
||||||
_, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
_, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar, printer.P)
|
||||||
bar.Done()
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Fatal(err.Error())
|
return errors.Fatal(err.Error())
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/feature"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/restic/restic/internal/ui/progress"
|
"github.com/restic/restic/internal/ui/progress"
|
||||||
|
|
||||||
|
@ -18,6 +19,8 @@ type repackBlobSet interface {
|
||||||
Len() int
|
Len() int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type LogFunc func(msg string, args ...interface{})
|
||||||
|
|
||||||
// Repack takes a list of packs together with a list of blobs contained in
|
// Repack takes a list of packs together with a list of blobs contained in
|
||||||
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
||||||
// into a new pack. Returned is the list of obsolete packs which can then
|
// into a new pack. Returned is the list of obsolete packs which can then
|
||||||
|
@ -25,9 +28,21 @@ type repackBlobSet interface {
|
||||||
//
|
//
|
||||||
// The map keepBlobs is modified by Repack, it is used to keep track of which
|
// The map keepBlobs is modified by Repack, it is used to keep track of which
|
||||||
// blobs have been processed.
|
// blobs have been processed.
|
||||||
func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Repository, packs restic.IDSet, keepBlobs repackBlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) {
|
func Repack(
|
||||||
|
ctx context.Context,
|
||||||
|
repo restic.Repository,
|
||||||
|
dstRepo restic.Repository,
|
||||||
|
packs restic.IDSet,
|
||||||
|
keepBlobs repackBlobSet,
|
||||||
|
p *progress.Counter,
|
||||||
|
logf LogFunc,
|
||||||
|
) (obsoletePacks restic.IDSet, err error) {
|
||||||
debug.Log("repacking %d packs while keeping %d blobs", len(packs), keepBlobs.Len())
|
debug.Log("repacking %d packs while keeping %d blobs", len(packs), keepBlobs.Len())
|
||||||
|
|
||||||
|
if logf == nil {
|
||||||
|
logf = func(_ string, _ ...interface{}) {}
|
||||||
|
}
|
||||||
|
|
||||||
if repo == dstRepo && dstRepo.Connections() < 2 {
|
if repo == dstRepo && dstRepo.Connections() < 2 {
|
||||||
return nil, errors.New("repack step requires a backend connection limit of at least two")
|
return nil, errors.New("repack step requires a backend connection limit of at least two")
|
||||||
}
|
}
|
||||||
|
@ -37,7 +52,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
|
||||||
dstRepo.StartPackUploader(wgCtx, wg)
|
dstRepo.StartPackUploader(wgCtx, wg)
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
var err error
|
var err error
|
||||||
obsoletePacks, err = repack(wgCtx, repo, dstRepo, packs, keepBlobs, p)
|
obsoletePacks, err = repack(wgCtx, repo, dstRepo, packs, keepBlobs, p, logf)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -47,9 +62,30 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
|
||||||
return obsoletePacks, nil
|
return obsoletePacks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Repository, packs restic.IDSet, keepBlobs repackBlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) {
|
func repack(
|
||||||
|
ctx context.Context,
|
||||||
|
repo restic.Repository,
|
||||||
|
dstRepo restic.Repository,
|
||||||
|
packs restic.IDSet,
|
||||||
|
keepBlobs repackBlobSet,
|
||||||
|
p *progress.Counter,
|
||||||
|
logf LogFunc,
|
||||||
|
) (obsoletePacks restic.IDSet, err error) {
|
||||||
wg, wgCtx := errgroup.WithContext(ctx)
|
wg, wgCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
if feature.Flag.Enabled(feature.S3Restore) {
|
||||||
|
job, err := repo.StartWarmup(ctx, packs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if job.HandleCount() != 0 {
|
||||||
|
logf("warming up %d packs from cold storage, this may take a while...", job.HandleCount())
|
||||||
|
if err := job.Wait(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var keepMutex sync.Mutex
|
var keepMutex sync.Mutex
|
||||||
downloadQueue := make(chan restic.PackBlobs)
|
downloadQueue := make(chan restic.PackBlobs)
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
|
|
|
@ -160,7 +160,7 @@ func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSe
|
||||||
}
|
}
|
||||||
|
|
||||||
func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) {
|
func repack(t *testing.T, repo restic.Repository, be backend.Backend, packs restic.IDSet, blobs restic.BlobSet) {
|
||||||
repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil)
|
repackedBlobs, err := repository.Repack(context.TODO(), repo, repo, packs, blobs, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -279,7 +279,7 @@ func testRepackCopy(t *testing.T, version uint) {
|
||||||
_, keepBlobs := selectBlobs(t, random, repo, 0.2)
|
_, keepBlobs := selectBlobs(t, random, repo, 0.2)
|
||||||
copyPacks := findPacksForBlobs(t, repo, keepBlobs)
|
copyPacks := findPacksForBlobs(t, repo, keepBlobs)
|
||||||
|
|
||||||
_, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil)
|
_, err := repository.Repack(context.TODO(), repoWrapped, dstRepoWrapped, copyPacks, keepBlobs, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -318,7 +318,7 @@ func testRepackWrongBlob(t *testing.T, version uint) {
|
||||||
_, keepBlobs := selectBlobs(t, random, repo, 0)
|
_, keepBlobs := selectBlobs(t, random, repo, 0)
|
||||||
rewritePacks := findPacksForBlobs(t, repo, keepBlobs)
|
rewritePacks := findPacksForBlobs(t, repo, keepBlobs)
|
||||||
|
|
||||||
_, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil)
|
_, err := repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected repack to fail but got no error")
|
t.Fatal("expected repack to fail but got no error")
|
||||||
}
|
}
|
||||||
|
@ -366,7 +366,7 @@ func testRepackBlobFallback(t *testing.T, version uint) {
|
||||||
rtest.OK(t, repo.Flush(context.Background()))
|
rtest.OK(t, repo.Flush(context.Background()))
|
||||||
|
|
||||||
// repack must fallback to valid copy
|
// repack must fallback to valid copy
|
||||||
_, err = repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil)
|
_, err = repository.Repack(context.TODO(), repo, repo, rewritePacks, keepBlobs, nil, nil)
|
||||||
rtest.OK(t, err)
|
rtest.OK(t, err)
|
||||||
|
|
||||||
keepBlobs = restic.NewBlobSet(restic.BlobHandle{Type: restic.DataBlob, ID: id})
|
keepBlobs = restic.NewBlobSet(restic.BlobHandle{Type: restic.DataBlob, ID: id})
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/backend"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WarmupJob struct {
|
||||||
|
repo *Repository
|
||||||
|
handlesWarmingUp []backend.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleCount returns the number of handles that are currently warming up.
|
||||||
|
func (job *WarmupJob) HandleCount() int {
|
||||||
|
return len(job.handlesWarmingUp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for all handles to be warm.
|
||||||
|
func (job *WarmupJob) Wait(ctx context.Context) error {
|
||||||
|
return job.repo.be.WarmupWait(ctx, job.handlesWarmingUp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs.
|
||||||
|
func (repo *Repository) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) {
|
||||||
|
handles := make([]backend.Handle, 0, len(packs))
|
||||||
|
for pack := range packs {
|
||||||
|
handles = append(
|
||||||
|
handles,
|
||||||
|
backend.Handle{Type: restic.PackFile, Name: pack.String()},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
handlesWarmingUp, err := repo.be.Warmup(ctx, handles)
|
||||||
|
return &WarmupJob{
|
||||||
|
repo: repo,
|
||||||
|
handlesWarmingUp: handlesWarmingUp,
|
||||||
|
}, err
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/restic/restic/internal/backend"
|
||||||
|
"github.com/restic/restic/internal/backend/mock"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWarmupRepository(t *testing.T) {
|
||||||
|
warmupCalls := [][]backend.Handle{}
|
||||||
|
warmupWaitCalls := [][]backend.Handle{}
|
||||||
|
simulateWarmingUp := false
|
||||||
|
|
||||||
|
be := mock.NewBackend()
|
||||||
|
be.WarmupFn = func(ctx context.Context, handles []backend.Handle) ([]backend.Handle, error) {
|
||||||
|
warmupCalls = append(warmupCalls, handles)
|
||||||
|
if simulateWarmingUp {
|
||||||
|
return handles, nil
|
||||||
|
}
|
||||||
|
return []backend.Handle{}, nil
|
||||||
|
}
|
||||||
|
be.WarmupWaitFn = func(ctx context.Context, handles []backend.Handle) error {
|
||||||
|
warmupWaitCalls = append(warmupWaitCalls, handles)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
repo, _ := New(be, Options{})
|
||||||
|
|
||||||
|
id1, _ := restic.ParseID("1111111111111111111111111111111111111111111111111111111111111111")
|
||||||
|
id2, _ := restic.ParseID("2222222222222222222222222222222222222222222222222222222222222222")
|
||||||
|
id3, _ := restic.ParseID("3333333333333333333333333333333333333333333333333333333333333333")
|
||||||
|
job, err := repo.StartWarmup(context.TODO(), restic.NewIDSet(id1, id2))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error when starting warmup: %v", err)
|
||||||
|
}
|
||||||
|
if len(warmupCalls) != 1 {
|
||||||
|
t.Fatalf("expected %d calls to warmup, got %d", 1, len(warmupCalls))
|
||||||
|
}
|
||||||
|
if len(warmupCalls[0]) != 2 {
|
||||||
|
t.Fatalf("expected warmup on %d handles, got %d", 2, len(warmupCalls[0]))
|
||||||
|
}
|
||||||
|
if job.HandleCount() != 0 {
|
||||||
|
t.Fatalf("expected all files to be warm, got %d cold", job.HandleCount())
|
||||||
|
}
|
||||||
|
|
||||||
|
simulateWarmingUp = true
|
||||||
|
job, err = repo.StartWarmup(context.TODO(), restic.NewIDSet(id3))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error when starting warmup: %v", err)
|
||||||
|
}
|
||||||
|
if len(warmupCalls) != 2 {
|
||||||
|
t.Fatalf("expected %d calls to warmup, got %d", 2, len(warmupCalls))
|
||||||
|
}
|
||||||
|
if len(warmupCalls[1]) != 1 {
|
||||||
|
t.Fatalf("expected warmup on %d handles, got %d", 1, len(warmupCalls[1]))
|
||||||
|
}
|
||||||
|
if job.HandleCount() != 1 {
|
||||||
|
t.Fatalf("expected %d file to be warming up, got %d", 1, job.HandleCount())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := job.Wait(context.TODO()); err != nil {
|
||||||
|
t.Fatalf("error when waiting warmup: %v", err)
|
||||||
|
}
|
||||||
|
if len(warmupWaitCalls) != 1 {
|
||||||
|
t.Fatalf("expected %d calls to warmupWait, got %d", 1, len(warmupCalls))
|
||||||
|
}
|
||||||
|
if len(warmupWaitCalls[0]) != 1 {
|
||||||
|
t.Fatalf("expected warmupWait to be called with %d handles, got %d", 1, len(warmupWaitCalls[0]))
|
||||||
|
}
|
||||||
|
}
|
|
@ -60,6 +60,9 @@ type Repository interface {
|
||||||
SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error)
|
SaveUnpacked(ctx context.Context, t WriteableFileType, buf []byte) (ID, error)
|
||||||
// RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots.
|
// RemoveUnpacked removes a file from the repository. This will eventually be restricted to deleting only snapshots.
|
||||||
RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error
|
RemoveUnpacked(ctx context.Context, t WriteableFileType, id ID) error
|
||||||
|
|
||||||
|
// StartWarmup creates a new warmup job, requesting the backend to warmup the specified packs.
|
||||||
|
StartWarmup(ctx context.Context, packs IDSet) (WarmupJob, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileType = backend.FileType
|
type FileType = backend.FileType
|
||||||
|
@ -157,3 +160,10 @@ type Unpacked[FT FileTypes] interface {
|
||||||
type ListBlobser interface {
|
type ListBlobser interface {
|
||||||
ListBlobs(ctx context.Context, fn func(PackedBlob)) error
|
ListBlobs(ctx context.Context, fn func(PackedBlob)) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WarmupJob interface {
|
||||||
|
// HandleCount returns the number of handles that are currently warming up.
|
||||||
|
HandleCount() int
|
||||||
|
// Wait waits for all handles to be warm.
|
||||||
|
Wait(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package restorer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -9,6 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/restic/restic/internal/debug"
|
"github.com/restic/restic/internal/debug"
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/feature"
|
||||||
"github.com/restic/restic/internal/repository"
|
"github.com/restic/restic/internal/repository"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
"github.com/restic/restic/internal/ui/restore"
|
"github.com/restic/restic/internal/ui/restore"
|
||||||
|
@ -41,12 +43,15 @@ type packInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error
|
type blobsLoaderFn func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error
|
||||||
|
type startWarmupFn func(context.Context, restic.IDSet) (restic.WarmupJob, error)
|
||||||
|
|
||||||
// fileRestorer restores set of files
|
// fileRestorer restores set of files
|
||||||
type fileRestorer struct {
|
type fileRestorer struct {
|
||||||
idx func(restic.BlobType, restic.ID) []restic.PackedBlob
|
idx func(restic.BlobType, restic.ID) []restic.PackedBlob
|
||||||
blobsLoader blobsLoaderFn
|
blobsLoader blobsLoaderFn
|
||||||
|
|
||||||
|
startWarmup startWarmupFn
|
||||||
|
|
||||||
workerCount int
|
workerCount int
|
||||||
filesWriter *filesWriter
|
filesWriter *filesWriter
|
||||||
zeroChunk restic.ID
|
zeroChunk restic.ID
|
||||||
|
@ -58,6 +63,7 @@ type fileRestorer struct {
|
||||||
dst string
|
dst string
|
||||||
files []*fileInfo
|
files []*fileInfo
|
||||||
Error func(string, error) error
|
Error func(string, error) error
|
||||||
|
Info func(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFileRestorer(dst string,
|
func newFileRestorer(dst string,
|
||||||
|
@ -66,6 +72,7 @@ func newFileRestorer(dst string,
|
||||||
connections uint,
|
connections uint,
|
||||||
sparse bool,
|
sparse bool,
|
||||||
allowRecursiveDelete bool,
|
allowRecursiveDelete bool,
|
||||||
|
startWarmup startWarmupFn,
|
||||||
progress *restore.Progress) *fileRestorer {
|
progress *restore.Progress) *fileRestorer {
|
||||||
|
|
||||||
// as packs are streamed the concurrency is limited by IO
|
// as packs are streamed the concurrency is limited by IO
|
||||||
|
@ -74,6 +81,7 @@ func newFileRestorer(dst string,
|
||||||
return &fileRestorer{
|
return &fileRestorer{
|
||||||
idx: idx,
|
idx: idx,
|
||||||
blobsLoader: blobsLoader,
|
blobsLoader: blobsLoader,
|
||||||
|
startWarmup: startWarmup,
|
||||||
filesWriter: newFilesWriter(workerCount, allowRecursiveDelete),
|
filesWriter: newFilesWriter(workerCount, allowRecursiveDelete),
|
||||||
zeroChunk: repository.ZeroChunk(),
|
zeroChunk: repository.ZeroChunk(),
|
||||||
sparse: sparse,
|
sparse: sparse,
|
||||||
|
@ -82,6 +90,7 @@ func newFileRestorer(dst string,
|
||||||
workerCount: workerCount,
|
workerCount: workerCount,
|
||||||
dst: dst,
|
dst: dst,
|
||||||
Error: restorerAbortOnAllErrors,
|
Error: restorerAbortOnAllErrors,
|
||||||
|
Info: func(_ string) {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,6 +201,19 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
|
||||||
// drop no longer necessary file list
|
// drop no longer necessary file list
|
||||||
r.files = nil
|
r.files = nil
|
||||||
|
|
||||||
|
if feature.Flag.Enabled(feature.S3Restore) {
|
||||||
|
warmupJob, err := r.startWarmup(ctx, restic.NewIDSet(packOrder...))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if warmupJob.HandleCount() != 0 {
|
||||||
|
r.Info(fmt.Sprintf("warming up %d packs from cold storage, this may take a while...", warmupJob.HandleCount()))
|
||||||
|
if err := warmupJob.Wait(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wg, ctx := errgroup.WithContext(ctx)
|
wg, ctx := errgroup.WithContext(ctx)
|
||||||
downloadCh := make(chan *packInfo)
|
downloadCh := make(chan *packInfo)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/errors"
|
"github.com/restic/restic/internal/errors"
|
||||||
|
"github.com/restic/restic/internal/feature"
|
||||||
"github.com/restic/restic/internal/restic"
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
)
|
)
|
||||||
|
@ -23,6 +24,11 @@ type TestFile struct {
|
||||||
blobs []TestBlob
|
blobs []TestBlob
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TestWarmupJob struct {
|
||||||
|
handlesCount int
|
||||||
|
waitCalled bool
|
||||||
|
}
|
||||||
|
|
||||||
type TestRepo struct {
|
type TestRepo struct {
|
||||||
packsIDToData map[restic.ID][]byte
|
packsIDToData map[restic.ID][]byte
|
||||||
|
|
||||||
|
@ -31,6 +37,8 @@ type TestRepo struct {
|
||||||
files []*fileInfo
|
files []*fileInfo
|
||||||
filesPathToContent map[string]string
|
filesPathToContent map[string]string
|
||||||
|
|
||||||
|
warmupJobs []*TestWarmupJob
|
||||||
|
|
||||||
//
|
//
|
||||||
loader blobsLoaderFn
|
loader blobsLoaderFn
|
||||||
}
|
}
|
||||||
|
@ -44,6 +52,21 @@ func (i *TestRepo) fileContent(file *fileInfo) string {
|
||||||
return i.filesPathToContent[file.location]
|
return i.filesPathToContent[file.location]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *TestRepo) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) {
|
||||||
|
job := TestWarmupJob{handlesCount: len(packs)}
|
||||||
|
i.warmupJobs = append(i.warmupJobs, &job)
|
||||||
|
return &job, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (job *TestWarmupJob) HandleCount() int {
|
||||||
|
return job.handlesCount
|
||||||
|
}
|
||||||
|
|
||||||
|
func (job *TestWarmupJob) Wait(_ context.Context) error {
|
||||||
|
job.waitCalled = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func newTestRepo(content []TestFile) *TestRepo {
|
func newTestRepo(content []TestFile) *TestRepo {
|
||||||
type Pack struct {
|
type Pack struct {
|
||||||
name string
|
name string
|
||||||
|
@ -111,6 +134,7 @@ func newTestRepo(content []TestFile) *TestRepo {
|
||||||
blobs: blobs,
|
blobs: blobs,
|
||||||
files: files,
|
files: files,
|
||||||
filesPathToContent: filesPathToContent,
|
filesPathToContent: filesPathToContent,
|
||||||
|
warmupJobs: []*TestWarmupJob{},
|
||||||
}
|
}
|
||||||
repo.loader = func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
repo.loader = func(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
||||||
blobs = append([]restic.Blob{}, blobs...)
|
blobs = append([]restic.Blob{}, blobs...)
|
||||||
|
@ -141,10 +165,12 @@ func newTestRepo(content []TestFile) *TestRepo {
|
||||||
}
|
}
|
||||||
|
|
||||||
func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files map[string]bool, sparse bool) {
|
func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files map[string]bool, sparse bool) {
|
||||||
|
defer feature.TestSetFlag(t, feature.Flag, feature.S3Restore, true)()
|
||||||
|
|
||||||
t.Helper()
|
t.Helper()
|
||||||
repo := newTestRepo(content)
|
repo := newTestRepo(content)
|
||||||
|
|
||||||
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, nil)
|
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, sparse, false, repo.StartWarmup, nil)
|
||||||
|
|
||||||
if files == nil {
|
if files == nil {
|
||||||
r.files = repo.files
|
r.files = repo.files
|
||||||
|
@ -177,6 +203,15 @@ func verifyRestore(t *testing.T, r *fileRestorer, repo *TestRepo) {
|
||||||
t.Errorf("file %v has wrong content: want %q, got %q", file.location, content, data)
|
t.Errorf("file %v has wrong content: want %q, got %q", file.location, content, data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(repo.warmupJobs) == 0 {
|
||||||
|
t.Errorf("warmup did not occur")
|
||||||
|
}
|
||||||
|
for i, warmupJob := range repo.warmupJobs {
|
||||||
|
if !warmupJob.waitCalled {
|
||||||
|
t.Errorf("warmup job %d was not waited", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileRestorerBasic(t *testing.T) {
|
func TestFileRestorerBasic(t *testing.T) {
|
||||||
|
@ -285,7 +320,7 @@ func TestErrorRestoreFiles(t *testing.T) {
|
||||||
return loadError
|
return loadError
|
||||||
}
|
}
|
||||||
|
|
||||||
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil)
|
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, repo.StartWarmup, nil)
|
||||||
r.files = repo.files
|
r.files = repo.files
|
||||||
|
|
||||||
err := r.restoreFiles(context.TODO())
|
err := r.restoreFiles(context.TODO())
|
||||||
|
@ -326,7 +361,7 @@ func TestFatalDownloadError(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, nil)
|
r := newFileRestorer(tempdir, repo.loader, repo.Lookup, 2, false, false, repo.StartWarmup, nil)
|
||||||
r.files = repo.files
|
r.files = repo.files
|
||||||
|
|
||||||
var errors []string
|
var errors []string
|
||||||
|
|
|
@ -28,6 +28,7 @@ type Restorer struct {
|
||||||
|
|
||||||
Error func(location string, err error) error
|
Error func(location string, err error) error
|
||||||
Warn func(message string)
|
Warn func(message string)
|
||||||
|
Info func(message string)
|
||||||
// SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected.
|
// SelectFilter determines whether the item is selectedForRestore or whether a childMayBeSelected.
|
||||||
// selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir.
|
// selectedForRestore must not depend on isDir as `removeUnexpectedFiles` always passes false to isDir.
|
||||||
SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool)
|
SelectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool)
|
||||||
|
@ -357,8 +358,9 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error)
|
||||||
|
|
||||||
idx := NewHardlinkIndex[string]()
|
idx := NewHardlinkIndex[string]()
|
||||||
filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob,
|
filerestorer := newFileRestorer(dst, res.repo.LoadBlobsFromPack, res.repo.LookupBlob,
|
||||||
res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.opts.Progress)
|
res.repo.Connections(), res.opts.Sparse, res.opts.Delete, res.repo.StartWarmup, res.opts.Progress)
|
||||||
filerestorer.Error = res.Error
|
filerestorer.Error = res.Error
|
||||||
|
filerestorer.Info = res.Info
|
||||||
|
|
||||||
debug.Log("first pass for %q", dst)
|
debug.Log("first pass for %q", dst)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue