2016-01-23 19:19:26 +01:00
|
|
|
package mem
|
2015-11-22 16:12:00 +01:00
|
|
|
|
|
|
|
import (
|
2017-01-22 22:01:12 +01:00
|
|
|
"bytes"
|
2017-06-03 17:39:57 +02:00
|
|
|
"context"
|
2020-12-19 12:50:22 +01:00
|
|
|
"encoding/base64"
|
2024-04-14 11:43:18 +02:00
|
|
|
"fmt"
|
2020-12-19 12:39:48 +01:00
|
|
|
"hash"
|
2015-11-22 16:12:00 +01:00
|
|
|
"io"
|
2023-06-08 16:53:55 +02:00
|
|
|
"net/http"
|
2015-11-22 16:12:00 +01:00
|
|
|
"sync"
|
2015-11-22 16:30:13 +01:00
|
|
|
|
2022-10-15 23:14:33 +02:00
|
|
|
"github.com/cespare/xxhash/v2"
|
2023-10-01 11:40:12 +02:00
|
|
|
"github.com/restic/restic/internal/backend"
|
2023-06-08 16:53:55 +02:00
|
|
|
"github.com/restic/restic/internal/backend/location"
|
2023-10-01 10:24:33 +02:00
|
|
|
"github.com/restic/restic/internal/backend/util"
|
2020-12-17 12:47:53 +01:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-07-23 14:21:03 +02:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2015-11-22 16:12:00 +01:00
|
|
|
)
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
type memMap map[backend.Handle][]byte
|
2015-11-22 16:12:00 +01:00
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// make sure that MemoryBackend implements backend.Backend
|
2023-10-01 11:40:12 +02:00
|
|
|
var _ backend.Backend = &MemoryBackend{}
|
2016-08-31 19:10:10 +02:00
|
|
|
|
2023-06-08 16:53:55 +02:00
|
|
|
// NewFactory creates a persistent mem backend
|
|
|
|
func NewFactory() location.Factory {
|
|
|
|
be := New()
|
|
|
|
|
|
|
|
return location.NewHTTPBackendFactory[struct{}, *MemoryBackend](
|
2023-06-08 17:32:43 +02:00
|
|
|
"mem",
|
2024-02-10 22:58:10 +01:00
|
|
|
func(_ string) (*struct{}, error) {
|
2023-06-08 16:53:55 +02:00
|
|
|
return &struct{}{}, nil
|
|
|
|
},
|
|
|
|
location.NoPassword,
|
|
|
|
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
|
|
|
|
return be, nil
|
|
|
|
},
|
|
|
|
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
|
|
|
|
return be, nil
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2024-04-14 11:43:18 +02:00
|
|
|
var errNotFound = fmt.Errorf("not found")
|
2024-05-11 00:12:13 +02:00
|
|
|
var errTooSmall = errors.New("access beyond end of file")
|
2017-06-15 13:40:27 +02:00
|
|
|
|
2022-04-23 11:22:00 +02:00
|
|
|
const connectionCount = 2
|
|
|
|
|
2015-11-22 16:12:00 +01:00
|
|
|
// MemoryBackend is a mock backend that uses a map for storing all data in
|
|
|
|
// memory. This should only be used for tests.
|
|
|
|
type MemoryBackend struct {
|
|
|
|
data memMap
|
|
|
|
m sync.Mutex
|
|
|
|
}
|
|
|
|
|
2016-01-23 19:19:26 +01:00
|
|
|
// New returns a new backend that saves all data in a map in memory.
|
|
|
|
func New() *MemoryBackend {
|
2015-11-22 16:12:00 +01:00
|
|
|
be := &MemoryBackend{
|
|
|
|
data: make(memMap),
|
|
|
|
}
|
|
|
|
|
2016-09-27 22:35:08 +02:00
|
|
|
debug.Log("created new memory backend")
|
2015-11-22 16:30:13 +01:00
|
|
|
|
2015-11-22 16:12:00 +01:00
|
|
|
return be
|
|
|
|
}
|
|
|
|
|
2017-06-15 13:40:27 +02:00
|
|
|
// IsNotExist returns true if the file does not exist.
|
|
|
|
func (be *MemoryBackend) IsNotExist(err error) bool {
|
2022-06-13 20:35:37 +02:00
|
|
|
return errors.Is(err, errNotFound)
|
2017-06-15 13:40:27 +02:00
|
|
|
}
|
|
|
|
|
2024-05-11 00:12:13 +02:00
|
|
|
func (be *MemoryBackend) IsPermanentError(err error) bool {
|
|
|
|
return be.IsNotExist(err) || errors.Is(err, errTooSmall)
|
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// Save adds new Data to the backend.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
2016-01-24 01:15:35 +01:00
|
|
|
be.m.Lock()
|
|
|
|
defer be.m.Unlock()
|
|
|
|
|
2023-10-01 10:52:57 +02:00
|
|
|
h.IsMetadata = false
|
2023-10-01 11:40:12 +02:00
|
|
|
if h.Type == backend.ConfigFile {
|
2016-01-24 01:15:35 +01:00
|
|
|
h.Name = ""
|
|
|
|
}
|
|
|
|
|
2017-01-25 17:48:35 +01:00
|
|
|
if _, ok := be.data[h]; ok {
|
2016-01-24 20:23:50 +01:00
|
|
|
return errors.New("file already exists")
|
|
|
|
}
|
|
|
|
|
2022-12-02 19:36:43 +01:00
|
|
|
buf, err := io.ReadAll(rd)
|
2017-01-22 12:32:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-18 23:41:29 +01:00
|
|
|
// sanity check
|
|
|
|
if int64(len(buf)) != rd.Length() {
|
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", len(buf), rd.Length())
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:50:22 +01:00
|
|
|
beHash := be.Hasher()
|
|
|
|
// must never fail according to interface
|
2021-01-29 22:12:51 +01:00
|
|
|
_, err = beHash.Write(buf)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2020-12-19 12:50:22 +01:00
|
|
|
if !bytes.Equal(beHash.Sum(nil), rd.Hash()) {
|
|
|
|
return errors.Errorf("invalid file hash or content, got %s expected %s",
|
|
|
|
base64.RawStdEncoding.EncodeToString(beHash.Sum(nil)),
|
|
|
|
base64.RawStdEncoding.EncodeToString(rd.Hash()),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-01-30 00:15:20 +01:00
|
|
|
be.data[h] = buf
|
|
|
|
|
2020-11-08 00:05:53 +01:00
|
|
|
return ctx.Err()
|
2016-01-24 01:15:35 +01:00
|
|
|
}
|
|
|
|
|
2018-01-17 05:59:16 +01:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
2023-10-01 10:24:33 +02:00
|
|
|
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
2018-01-17 05:59:16 +01:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-01-22 22:01:12 +01:00
|
|
|
be.m.Lock()
|
|
|
|
defer be.m.Unlock()
|
|
|
|
|
2023-10-01 10:52:57 +02:00
|
|
|
h.IsMetadata = false
|
2023-10-01 11:40:12 +02:00
|
|
|
if h.Type == backend.ConfigFile {
|
2017-01-22 22:01:12 +01:00
|
|
|
h.Name = ""
|
|
|
|
}
|
|
|
|
|
2017-01-25 17:48:35 +01:00
|
|
|
if _, ok := be.data[h]; !ok {
|
2017-06-15 13:40:27 +02:00
|
|
|
return nil, errNotFound
|
2017-01-22 22:01:12 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 17:48:35 +01:00
|
|
|
buf := be.data[h]
|
2024-05-11 00:12:13 +02:00
|
|
|
if offset+int64(length) > int64(len(buf)) {
|
|
|
|
return nil, errTooSmall
|
2017-01-22 22:01:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
buf = buf[offset:]
|
2024-05-11 00:12:13 +02:00
|
|
|
if length > 0 {
|
2017-01-22 22:01:12 +01:00
|
|
|
buf = buf[:length]
|
|
|
|
}
|
|
|
|
|
2023-04-07 23:02:35 +02:00
|
|
|
return io.NopCloser(bytes.NewReader(buf)), ctx.Err()
|
2017-01-22 22:01:12 +01:00
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// Stat returns information about a file in the backend.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
|
2022-04-23 11:22:00 +02:00
|
|
|
be.m.Lock()
|
|
|
|
defer be.m.Unlock()
|
|
|
|
|
2023-10-01 10:52:57 +02:00
|
|
|
h.IsMetadata = false
|
2023-10-01 11:40:12 +02:00
|
|
|
if h.Type == backend.ConfigFile {
|
2016-01-23 23:27:58 +01:00
|
|
|
h.Name = ""
|
|
|
|
}
|
|
|
|
|
2017-01-25 17:48:35 +01:00
|
|
|
e, ok := be.data[h]
|
2016-01-23 23:27:58 +01:00
|
|
|
if !ok {
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{}, errNotFound
|
2016-01-23 23:27:58 +01:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{Size: int64(len(e)), Name: h.Name}, ctx.Err()
|
2016-01-23 23:27:58 +01:00
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// Remove deletes a file from the backend.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) Remove(ctx context.Context, h backend.Handle) error {
|
2015-11-22 16:12:00 +01:00
|
|
|
be.m.Lock()
|
|
|
|
defer be.m.Unlock()
|
|
|
|
|
2023-10-01 10:52:57 +02:00
|
|
|
h.IsMetadata = false
|
2017-01-25 17:48:35 +01:00
|
|
|
if _, ok := be.data[h]; !ok {
|
2017-06-15 13:40:27 +02:00
|
|
|
return errNotFound
|
2015-11-22 16:12:00 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 17:48:35 +01:00
|
|
|
delete(be.data, h)
|
2015-11-22 16:12:00 +01:00
|
|
|
|
2020-11-08 00:05:53 +01:00
|
|
|
return ctx.Err()
|
2015-11-22 16:12:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// List returns a channel which yields entries from the backend.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *MemoryBackend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
|
2018-01-21 17:25:36 +01:00
|
|
|
entries := make(map[string]int64)
|
2015-11-22 16:12:00 +01:00
|
|
|
|
2018-01-21 17:25:36 +01:00
|
|
|
be.m.Lock()
|
2018-01-20 13:43:07 +01:00
|
|
|
for entry, buf := range be.data {
|
2015-11-22 16:12:00 +01:00
|
|
|
if entry.Type != t {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-01-21 17:25:36 +01:00
|
|
|
entries[entry.Name] = int64(len(buf))
|
|
|
|
}
|
|
|
|
be.m.Unlock()
|
|
|
|
|
|
|
|
for name, size := range entries {
|
2023-10-01 11:40:12 +02:00
|
|
|
fi := backend.FileInfo{
|
2018-01-21 17:25:36 +01:00
|
|
|
Name: name,
|
|
|
|
Size: size,
|
2018-01-20 13:43:07 +01:00
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2018-01-20 13:43:07 +01:00
|
|
|
err := fn(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-22 16:30:13 +01:00
|
|
|
|
2018-01-20 13:43:07 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
2015-11-22 16:12:00 +01:00
|
|
|
}
|
2018-01-20 13:43:07 +01:00
|
|
|
}
|
2015-11-22 16:12:00 +01:00
|
|
|
|
2018-01-20 13:43:07 +01:00
|
|
|
return ctx.Err()
|
2015-11-22 16:12:00 +01:00
|
|
|
}
|
2016-08-31 19:10:10 +02:00
|
|
|
|
2021-08-07 22:20:49 +02:00
|
|
|
func (be *MemoryBackend) Connections() uint {
|
2022-04-23 11:22:00 +02:00
|
|
|
return connectionCount
|
2021-08-07 22:20:49 +02:00
|
|
|
}
|
|
|
|
|
2020-12-19 12:39:48 +01:00
|
|
|
// Hasher may return a hash function for calculating a content hash for the backend
|
|
|
|
func (be *MemoryBackend) Hasher() hash.Hash {
|
2022-10-15 23:14:33 +02:00
|
|
|
return xxhash.New()
|
2020-12-19 12:39:48 +01:00
|
|
|
}
|
|
|
|
|
2022-05-01 20:07:29 +02:00
|
|
|
// HasAtomicReplace returns whether Save() can atomically replace files
|
|
|
|
func (be *MemoryBackend) HasAtomicReplace() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
// Delete removes all data in the backend.
|
2017-06-03 17:39:57 +02:00
|
|
|
func (be *MemoryBackend) Delete(ctx context.Context) error {
|
2016-08-31 19:10:10 +02:00
|
|
|
be.m.Lock()
|
|
|
|
defer be.m.Unlock()
|
|
|
|
|
2020-11-08 00:05:53 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2016-08-31 19:10:10 +02:00
|
|
|
be.data = make(memMap)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the backend.
|
|
|
|
func (be *MemoryBackend) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
|
|
|
|
// Warmup not implemented
|
|
|
|
func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
|
|
|
return []backend.Handle{}, nil
|
|
|
|
}
|
|
|
|
func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|