feat(backends/s3): add warmup support for check command

Follow-up from https://github.com/restic/restic/pull/5173
See also https://github.com/restic/restic/issues/3202
This commit is contained in:
Gilbert Gilb's 2025-02-13 19:33:32 +01:00
parent 5ddda7f5e9
commit 3a7af1e701
4 changed files with 32 additions and 14 deletions

View File

@ -1,10 +1,10 @@
Enhancement: Add warmup support on S3 backend before repacks and restores
Introduce S3 backend options for transitioning pack files from cold to hot
storage on S3 and S3-compatible providers. Note: only works before repacks
(prune/copy) and restore for now, and gated behind a new "s3-restore" feature
flag.
storage on S3 and S3-compatible providers. Note: only works with prune, copy,
check and restore commands, and gated behind a new "s3-restore" feature flag.
https://github.com/restic/restic/pull/5173
https://github.com/restic/restic/pull/5248
https://github.com/restic/restic/issues/3202
https://github.com/restic/restic/issues/2504

View File

@ -375,11 +375,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
}
doReadData := func(packs map[restic.ID]int64) {
p := printer.NewCounter("packs")
p.SetMax(uint64(len(packs)))
errChan := make(chan error)
go chkr.ReadPacks(ctx, packs, p, errChan)
go chkr.ReadPacks(ctx, packs, printer, errChan)
for err := range errChan {
errorsFound = true
@ -389,7 +387,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
salvagePacks.Insert(err.PackID)
}
}
p.Done()
}
switch {

View File

@ -269,6 +269,7 @@ Archive** storage classes is available:
- Currently, only the following commands are known to work:
- `backup`
- `check`
- `copy`
- `prune`
- `restore`

View File

@ -10,6 +10,7 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/feature"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/repository/index"
"github.com/restic/restic/internal/repository/pack"
@ -441,15 +442,39 @@ func (c *Checker) GetPacks() map[restic.ID]int64 {
// ReadData loads all data from the repository and checks the integrity.
func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) {
c.ReadPacks(ctx, c.packs, nil, errChan)
c.ReadPacks(ctx, c.packs, &progress.NoopPrinter{}, errChan)
}
const maxStreamBufferSize = 4 * 1024 * 1024
// ReadPacks loads data from specified packs and checks the integrity.
func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *progress.Counter, errChan chan<- error) {
func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, printer progress.Printer, errChan chan<- error) {
defer close(errChan)
p := printer.NewCounter("packs")
p.SetMax(uint64(len(packs)))
defer p.Done()
packSet := restic.NewIDSet()
for pack := range packs {
packSet.Insert(pack)
}
if feature.Flag.Enabled(feature.S3Restore) {
job, err := c.repo.StartWarmup(ctx, packSet)
if err != nil {
errChan <- err
return
}
if job.HandleCount() != 0 {
printer.P("warming up %d packs from cold storage, this may take a while...", job.HandleCount())
if err := job.Wait(ctx); err != nil {
errChan <- err
return
}
}
}
g, ctx := errgroup.WithContext(ctx)
type checkTask struct {
id restic.ID
@ -497,11 +522,6 @@ func (c *Checker) ReadPacks(ctx context.Context, packs map[restic.ID]int64, p *p
})
}
packSet := restic.NewIDSet()
for pack := range packs {
packSet.Insert(pack)
}
// push packs to ch
for pbs := range c.repo.ListPacksFromIndex(ctx, packSet) {
size := packs[pbs.PackID]