restic/internal/backend/mem/mem_backend.go

258 lines
5.7 KiB
Go
Raw Normal View History

2016-01-23 19:19:26 +01:00
package mem
2015-11-22 16:12:00 +01:00
import (
2017-01-22 22:01:12 +01:00
"bytes"
2017-06-03 17:39:57 +02:00
"context"
"encoding/base64"
"fmt"
"hash"
2015-11-22 16:12:00 +01:00
"io"
"net/http"
2015-11-22 16:12:00 +01:00
"sync"
2015-11-22 16:30:13 +01:00
2022-10-15 23:14:33 +02:00
"github.com/cespare/xxhash/v2"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/util"
2020-12-17 12:47:53 +01:00
"github.com/restic/restic/internal/debug"
2017-07-23 14:21:03 +02:00
"github.com/restic/restic/internal/errors"
2015-11-22 16:12:00 +01:00
)
type memMap map[backend.Handle][]byte
2015-11-22 16:12:00 +01:00
// make sure that MemoryBackend implements backend.Backend
var _ backend.Backend = &MemoryBackend{}
// NewFactory creates a persistent mem backend
func NewFactory() location.Factory {
be := New()
return location.NewHTTPBackendFactory[struct{}, *MemoryBackend](
"mem",
2024-02-10 22:58:10 +01:00
func(_ string) (*struct{}, error) {
return &struct{}{}, nil
},
location.NoPassword,
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
return be, nil
},
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
return be, nil
},
)
}
var errNotFound = fmt.Errorf("not found")
var errTooSmall = errors.New("access beyond end of file")
2017-06-15 13:40:27 +02:00
const connectionCount = 2
2015-11-22 16:12:00 +01:00
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
}
2016-01-23 19:19:26 +01:00
// New returns a new backend that saves all data in a map in memory.
func New() *MemoryBackend {
2015-11-22 16:12:00 +01:00
be := &MemoryBackend{
data: make(memMap),
}
2016-09-27 22:35:08 +02:00
debug.Log("created new memory backend")
2015-11-22 16:30:13 +01:00
2015-11-22 16:12:00 +01:00
return be
}
2017-06-15 13:40:27 +02:00
// IsNotExist returns true if the file does not exist.
func (be *MemoryBackend) IsNotExist(err error) bool {
return errors.Is(err, errNotFound)
2017-06-15 13:40:27 +02:00
}
func (be *MemoryBackend) IsPermanentError(err error) bool {
return be.IsNotExist(err) || errors.Is(err, errTooSmall)
}
// Save adds new Data to the backend.
func (be *MemoryBackend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
2016-01-24 01:15:35 +01:00
be.m.Lock()
defer be.m.Unlock()
2023-10-01 10:52:57 +02:00
h.IsMetadata = false
if h.Type == backend.ConfigFile {
2016-01-24 01:15:35 +01:00
h.Name = ""
}
if _, ok := be.data[h]; ok {
2016-01-24 20:23:50 +01:00
return errors.New("file already exists")
}
buf, err := io.ReadAll(rd)
if err != nil {
return err
}
// sanity check
if int64(len(buf)) != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", len(buf), rd.Length())
}
beHash := be.Hasher()
// must never fail according to interface
_, err = beHash.Write(buf)
if err != nil {
panic(err)
}
if !bytes.Equal(beHash.Sum(nil), rd.Hash()) {
return errors.Errorf("invalid file hash or content, got %s expected %s",
base64.RawStdEncoding.EncodeToString(beHash.Sum(nil)),
base64.RawStdEncoding.EncodeToString(rd.Hash()),
)
}
be.data[h] = buf
2020-11-08 00:05:53 +01:00
return ctx.Err()
2016-01-24 01:15:35 +01:00
}
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *MemoryBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *MemoryBackend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
2017-01-22 22:01:12 +01:00
be.m.Lock()
defer be.m.Unlock()
2023-10-01 10:52:57 +02:00
h.IsMetadata = false
if h.Type == backend.ConfigFile {
2017-01-22 22:01:12 +01:00
h.Name = ""
}
if _, ok := be.data[h]; !ok {
2017-06-15 13:40:27 +02:00
return nil, errNotFound
2017-01-22 22:01:12 +01:00
}
buf := be.data[h]
if offset+int64(length) > int64(len(buf)) {
return nil, errTooSmall
2017-01-22 22:01:12 +01:00
}
buf = buf[offset:]
if length > 0 {
2017-01-22 22:01:12 +01:00
buf = buf[:length]
}
return io.NopCloser(bytes.NewReader(buf)), ctx.Err()
2017-01-22 22:01:12 +01:00
}
// Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
be.m.Lock()
defer be.m.Unlock()
2023-10-01 10:52:57 +02:00
h.IsMetadata = false
if h.Type == backend.ConfigFile {
2016-01-23 23:27:58 +01:00
h.Name = ""
}
e, ok := be.data[h]
2016-01-23 23:27:58 +01:00
if !ok {
return backend.FileInfo{}, errNotFound
2016-01-23 23:27:58 +01:00
}
return backend.FileInfo{Size: int64(len(e)), Name: h.Name}, ctx.Err()
2016-01-23 23:27:58 +01:00
}
// Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(ctx context.Context, h backend.Handle) error {
2015-11-22 16:12:00 +01:00
be.m.Lock()
defer be.m.Unlock()
2023-10-01 10:52:57 +02:00
h.IsMetadata = false
if _, ok := be.data[h]; !ok {
2017-06-15 13:40:27 +02:00
return errNotFound
2015-11-22 16:12:00 +01:00
}
delete(be.data, h)
2015-11-22 16:12:00 +01:00
2020-11-08 00:05:53 +01:00
return ctx.Err()
2015-11-22 16:12:00 +01:00
}
// List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
entries := make(map[string]int64)
2015-11-22 16:12:00 +01:00
be.m.Lock()
for entry, buf := range be.data {
2015-11-22 16:12:00 +01:00
if entry.Type != t {
continue
}
entries[entry.Name] = int64(len(buf))
}
be.m.Unlock()
for name, size := range entries {
fi := backend.FileInfo{
Name: name,
Size: size,
}
if ctx.Err() != nil {
return ctx.Err()
}
err := fn(fi)
if err != nil {
return err
}
2015-11-22 16:30:13 +01:00
if ctx.Err() != nil {
return ctx.Err()
2015-11-22 16:12:00 +01:00
}
}
2015-11-22 16:12:00 +01:00
return ctx.Err()
2015-11-22 16:12:00 +01:00
}
2021-08-07 22:20:49 +02:00
func (be *MemoryBackend) Connections() uint {
return connectionCount
2021-08-07 22:20:49 +02:00
}
// Hasher may return a hash function for calculating a content hash for the backend
func (be *MemoryBackend) Hasher() hash.Hash {
2022-10-15 23:14:33 +02:00
return xxhash.New()
}
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *MemoryBackend) HasAtomicReplace() bool {
return false
}
// Delete removes all data in the backend.
2017-06-03 17:39:57 +02:00
func (be *MemoryBackend) Delete(ctx context.Context) error {
be.m.Lock()
defer be.m.Unlock()
2020-11-08 00:05:53 +01:00
if ctx.Err() != nil {
return ctx.Err()
}
be.data = make(memMap)
return nil
}
// Close closes the backend.
func (be *MemoryBackend) Close() error {
return nil
}
feat(backends/s3): add warmup support before repacks and restores (#5173) * feat(backends/s3): add warmup support before repacks and restores This commit introduces basic support for transitioning pack files stored in cold storage to hot storage on S3 and S3-compatible providers. To prevent unexpected behavior for existing users, the feature is gated behind new flags: - `s3.enable-restore`: opt-in flag (defaults to false) - `s3.restore-days`: number of days for the restored objects to remain in hot storage (defaults to `7`) - `s3.restore-timeout`: maximum time to wait for a single restoration (default to `1 day`) - `s3.restore-tier`: retrieval tier at which the restore will be processed. (default to `Standard`) As restoration times can be lengthy, this implementation preemptively restores selected packs to prevent incessant restore-delays during downloads. This is slightly sub-optimal as we could process packs out-of-order (as soon as they're transitioned), but this would really add too much complexity for a marginal gain in speed. To maintain simplicity and prevent resources exhautions with lots of packs, no new concurrency mechanisms or goroutines were added. This just hooks gracefully into the existing routines. **Limitations:** - Tests against the backend were not written due to the lack of cold storage class support in MinIO. Testing was done manually on Scaleway's S3-compatible object storage. If necessary, we could explore testing with LocalStack or mocks, though this requires further discussion. - Currently, this feature only warms up before restores and repacks (prune/copy), as those are the two main use-cases I came across. Support for other commands may be added in future iterations, as long as affected packs can be calculated in advance. - The feature is gated behind a new alpha `s3-restore` feature flag to make it explicit that the feature is still wet behind the ears. - There is no explicit user notification for ongoing pack restorations. While I think it is not necessary because of the opt-in flag, showing some notice may improve usability (but would probably require major refactoring in the progress bar which I didn't want to start). Another possibility would be to add a flag to send restores requests and fail early. See https://github.com/restic/restic/issues/3202 * ui: warn user when files are warming up from cold storage * refactor: remove the PacksWarmer struct It's easier to handle multiple handles in the backend directly, and it may open the door to reducing the number of requests made to the backend in the future.
2025-02-01 19:26:27 +01:00
// Warmup not implemented
func (be *MemoryBackend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
return []backend.Handle{}, nil
}
func (be *MemoryBackend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }