index: automatically write full indexes in StorePack

This commit is contained in:
Michael Eischer 2025-02-12 21:05:54 +01:00
parent da47967316
commit 2fd8a3865c
4 changed files with 21 additions and 17 deletions

View File

@ -35,8 +35,8 @@ func TestAssociatedSet(t *testing.T) {
bh, blob := makeFakePackedBlob() bh, blob := makeFakePackedBlob()
mi := NewMasterIndex() mi := NewMasterIndex()
mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) test.OK(t, mi.StorePack(context.TODO(), blob.PackID, []restic.Blob{blob.Blob}, &noopSaver{}))
test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
bs := NewAssociatedSet[uint8](mi) bs := NewAssociatedSet[uint8](mi)
test.Equals(t, bs.Len(), 0) test.Equals(t, bs.Len(), 0)
@ -118,15 +118,15 @@ func TestAssociatedSetWithExtendedIndex(t *testing.T) {
_, blob := makeFakePackedBlob() _, blob := makeFakePackedBlob()
mi := NewMasterIndex() mi := NewMasterIndex()
mi.StorePack(blob.PackID, []restic.Blob{blob.Blob}) test.OK(t, mi.StorePack(context.TODO(), blob.PackID, []restic.Blob{blob.Blob}, &noopSaver{}))
test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
bs := NewAssociatedSet[uint8](mi) bs := NewAssociatedSet[uint8](mi)
// add new blobs to index after building the set // add new blobs to index after building the set
of, blob2 := makeFakePackedBlob() of, blob2 := makeFakePackedBlob()
mi.StorePack(blob2.PackID, []restic.Blob{blob2.Blob}) test.OK(t, mi.StorePack(context.TODO(), blob2.PackID, []restic.Blob{blob2.Blob}, &noopSaver{}))
test.OK(t, mi.SaveIndex(context.TODO(), &noopSaver{})) test.OK(t, mi.Flush(context.TODO(), &noopSaver{}))
// non-existent // non-existent
test.Equals(t, false, bs.Has(of)) test.Equals(t, false, bs.Has(of))

View File

@ -153,7 +153,12 @@ func (mi *MasterIndex) Insert(idx *Index) {
} }
// StorePack remembers the id and pack in the index. // StorePack remembers the id and pack in the index.
func (mi *MasterIndex) StorePack(id restic.ID, blobs []restic.Blob) { func (mi *MasterIndex) StorePack(ctx context.Context, id restic.ID, blobs []restic.Blob, r restic.SaverUnpacked[restic.FileType]) error {
mi.storePack(id, blobs)
return mi.saveFullIndex(ctx, r)
}
func (mi *MasterIndex) storePack(id restic.ID, blobs []restic.Blob) {
mi.idxMutex.Lock() mi.idxMutex.Lock()
defer mi.idxMutex.Unlock() defer mi.idxMutex.Unlock()
@ -589,13 +594,13 @@ func (mi *MasterIndex) saveIndex(ctx context.Context, r restic.SaverUnpacked[res
return mi.MergeFinalIndexes() return mi.MergeFinalIndexes()
} }
// SaveIndex saves all new indexes in the backend. // Flush saves all new indexes in the backend.
func (mi *MasterIndex) SaveIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { func (mi *MasterIndex) Flush(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error {
return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...) return mi.saveIndex(ctx, r, mi.finalizeNotFinalIndexes()...)
} }
// SaveFullIndex saves all full indexes in the backend. // saveFullIndex saves all full indexes in the backend.
func (mi *MasterIndex) SaveFullIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error { func (mi *MasterIndex) saveFullIndex(ctx context.Context, r restic.SaverUnpacked[restic.FileType]) error {
return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...) return mi.saveIndex(ctx, r, mi.finalizeFullIndexes()...)
} }

View File

@ -187,8 +187,5 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *packe
// update blobs in the index // update blobs in the index
debug.Log(" updating blobs %v to pack %v", p.Packer.Blobs(), id) debug.Log(" updating blobs %v to pack %v", p.Packer.Blobs(), id)
r.idx.StorePack(id, p.Packer.Blobs()) return r.idx.StorePack(ctx, id, p.Packer.Blobs(), &internalRepository{r})
// Save index if full
return r.idx.SaveFullIndex(ctx, &internalRepository{r})
} }

View File

@ -542,7 +542,7 @@ func (r *Repository) Flush(ctx context.Context) error {
return err return err
} }
return r.idx.SaveIndex(ctx, &internalRepository{r}) return r.idx.Flush(ctx, &internalRepository{r})
} }
func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) { func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) {
@ -701,7 +701,9 @@ func (r *Repository) createIndexFromPacks(ctx context.Context, packsize map[rest
invalid = append(invalid, fi.ID) invalid = append(invalid, fi.ID)
m.Unlock() m.Unlock()
} }
r.idx.StorePack(fi.ID, entries) if err := r.idx.StorePack(ctx, fi.ID, entries, &internalRepository{r}); err != nil {
return err
}
p.Add(1) p.Add(1)
} }