2017-07-08 15:38:48 +02:00
|
|
|
package azure
|
|
|
|
|
|
|
|
import (
|
2022-11-18 23:04:31 +01:00
|
|
|
"bytes"
|
2017-07-08 15:38:48 +02:00
|
|
|
"context"
|
2020-12-19 12:40:50 +01:00
|
|
|
"crypto/md5"
|
2018-05-31 21:26:28 +02:00
|
|
|
"encoding/base64"
|
2022-03-05 19:16:13 +01:00
|
|
|
"fmt"
|
2020-12-19 12:39:48 +01:00
|
|
|
"hash"
|
2017-07-08 15:38:48 +02:00
|
|
|
"io"
|
2017-08-05 21:25:38 +02:00
|
|
|
"net/http"
|
2017-07-08 15:38:48 +02:00
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
"github.com/restic/restic/internal/backend"
|
2022-10-15 16:23:39 +02:00
|
|
|
"github.com/restic/restic/internal/backend/layout"
|
2023-06-08 13:04:34 +02:00
|
|
|
"github.com/restic/restic/internal/backend/location"
|
2023-10-01 10:24:33 +02:00
|
|
|
"github.com/restic/restic/internal/backend/util"
|
2017-07-08 15:38:48 +02:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-08-05 21:46:15 +02:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2020-12-17 12:47:53 +01:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
|
2023-06-08 21:54:49 +02:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
2022-11-18 23:04:31 +01:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
|
|
azContainer "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
2017-07-08 15:38:48 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Backend stores data on an azure endpoint.
|
|
|
|
type Backend struct {
|
2022-11-18 23:04:31 +01:00
|
|
|
cfg Config
|
|
|
|
container *azContainer.Client
|
2021-08-07 22:20:49 +02:00
|
|
|
connections uint
|
2017-09-18 12:01:54 +02:00
|
|
|
prefix string
|
|
|
|
listMaxItems int
|
2022-10-15 16:23:39 +02:00
|
|
|
layout.Layout
|
2024-10-20 11:57:21 +02:00
|
|
|
|
|
|
|
accessTier blob.AccessTier
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
const saveLargeSize = 256 * 1024 * 1024
|
2017-09-18 12:01:54 +02:00
|
|
|
const defaultListMaxItems = 5000
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
// make sure that *Backend implements backend.Backend
|
2023-10-01 11:40:12 +02:00
|
|
|
var _ backend.Backend = &Backend{}
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2023-06-08 13:04:34 +02:00
|
|
|
func NewFactory() location.Factory {
|
2023-06-08 17:32:43 +02:00
|
|
|
return location.NewHTTPBackendFactory("azure", ParseConfig, location.NoPassword, Create, Open)
|
2023-06-08 13:04:34 +02:00
|
|
|
}
|
|
|
|
|
2017-09-24 20:04:23 +02:00
|
|
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-07-08 15:38:48 +02:00
|
|
|
debug.Log("open, config %#v", cfg)
|
2022-11-18 23:04:31 +01:00
|
|
|
var client *azContainer.Client
|
2022-03-05 19:16:13 +01:00
|
|
|
var err error
|
2022-11-18 23:04:31 +01:00
|
|
|
|
2023-06-25 02:06:54 +02:00
|
|
|
var endpointSuffix string
|
|
|
|
if cfg.EndpointSuffix != "" {
|
|
|
|
endpointSuffix = cfg.EndpointSuffix
|
|
|
|
} else {
|
|
|
|
endpointSuffix = "core.windows.net"
|
|
|
|
}
|
2024-11-13 16:02:22 +01:00
|
|
|
|
|
|
|
if cfg.AccountName == "" {
|
|
|
|
return nil, errors.Fatalf("unable to open Azure backend: Account name ($AZURE_ACCOUNT_NAME) is empty")
|
|
|
|
}
|
|
|
|
|
2023-06-25 02:06:54 +02:00
|
|
|
url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container)
|
2022-11-18 23:04:31 +01:00
|
|
|
opts := &azContainer.ClientOptions{
|
|
|
|
ClientOptions: azcore.ClientOptions{
|
2023-05-18 20:07:47 +02:00
|
|
|
Transport: &http.Client{Transport: rt},
|
2022-11-18 23:04:31 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-03-05 19:16:13 +01:00
|
|
|
if cfg.AccountKey.String() != "" {
|
|
|
|
// We have an account key value, find the BlobServiceClient
|
|
|
|
// from with a BasicClient
|
|
|
|
debug.Log(" - using account key")
|
2022-11-18 23:04:31 +01:00
|
|
|
cred, err := azblob.NewSharedKeyCredential(cfg.AccountName, cfg.AccountKey.Unwrap())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewSharedKeyCredential")
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err = azContainer.NewClientWithSharedKeyCredential(url, cred, opts)
|
|
|
|
|
2022-03-05 19:16:13 +01:00
|
|
|
if err != nil {
|
2022-11-18 23:04:31 +01:00
|
|
|
return nil, errors.Wrap(err, "NewClientWithSharedKeyCredential")
|
2022-03-05 19:16:13 +01:00
|
|
|
}
|
|
|
|
} else if cfg.AccountSAS.String() != "" {
|
|
|
|
// Get the client using the SAS Token as authentication, this
|
|
|
|
// is longer winded than above because the SDK wants a URL for the Account
|
|
|
|
// if your using a SAS token, and not just the account name
|
|
|
|
// we (as per the SDK ) assume the default Azure portal.
|
2022-11-18 23:04:31 +01:00
|
|
|
// https://github.com/Azure/azure-storage-blob-go/issues/130
|
2022-03-05 19:16:13 +01:00
|
|
|
debug.Log(" - using sas token")
|
2022-07-16 23:45:41 +02:00
|
|
|
sas := cfg.AccountSAS.Unwrap()
|
2022-11-18 23:04:31 +01:00
|
|
|
|
2022-07-16 23:45:41 +02:00
|
|
|
// strip query sign prefix
|
|
|
|
if sas[0] == '?' {
|
|
|
|
sas = sas[1:]
|
|
|
|
}
|
2022-11-18 23:04:31 +01:00
|
|
|
|
|
|
|
urlWithSAS := fmt.Sprintf("%s?%s", url, sas)
|
|
|
|
|
|
|
|
client, err = azContainer.NewClientWithNoCredential(urlWithSAS, opts)
|
2022-03-05 19:16:13 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken")
|
|
|
|
}
|
|
|
|
} else {
|
2024-05-18 22:15:38 +02:00
|
|
|
var cred azcore.TokenCredential
|
|
|
|
|
|
|
|
if cfg.ForceCliCredential {
|
|
|
|
debug.Log(" - using AzureCLICredential")
|
|
|
|
cred, err = azidentity.NewAzureCLICredential(nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewAzureCLICredential")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
debug.Log(" - using DefaultAzureCredential")
|
|
|
|
cred, err = azidentity.NewDefaultAzureCredential(nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewDefaultAzureCredential")
|
|
|
|
}
|
2023-06-08 21:54:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
client, err = azContainer.NewClient(url, cred, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewClient")
|
|
|
|
}
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
var accessTier blob.AccessTier
|
|
|
|
// if the access tier is not supported, then we will not set the access tier; during the upload process,
|
|
|
|
// the value will be inferred from the default configured on the storage account.
|
|
|
|
for _, tier := range supportedAccessTiers() {
|
|
|
|
if strings.EqualFold(string(tier), cfg.AccessTier) {
|
|
|
|
accessTier = tier
|
2024-10-20 16:54:49 +02:00
|
|
|
debug.Log(" - using access tier %v", accessTier)
|
2024-10-20 11:57:21 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
be := &Backend{
|
2024-08-26 21:15:58 +02:00
|
|
|
container: client,
|
|
|
|
cfg: cfg,
|
|
|
|
connections: cfg.Connections,
|
|
|
|
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
2017-09-18 12:01:54 +02:00
|
|
|
listMaxItems: defaultListMaxItems,
|
2024-10-20 11:57:21 +02:00
|
|
|
accessTier: accessTier,
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
func supportedAccessTiers() []blob.AccessTier {
|
|
|
|
return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive}
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
// Open opens the Azure backend at specified container.
|
2023-05-18 19:18:09 +02:00
|
|
|
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 20:04:23 +02:00
|
|
|
return open(cfg, rt)
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create opens the Azure backend at specified container and creates the container if
|
|
|
|
// it does not exist yet.
|
2022-11-18 23:04:31 +01:00
|
|
|
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 20:04:23 +02:00
|
|
|
be, err := open(cfg, rt)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open")
|
|
|
|
}
|
|
|
|
|
2022-12-11 22:04:00 +01:00
|
|
|
_, err = be.container.GetProperties(ctx, &azContainer.GetPropertiesOptions{})
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
if err != nil && bloberror.HasCode(err, bloberror.ContainerNotFound) {
|
|
|
|
_, err = be.container.Create(ctx, &azContainer.CreateOptions{})
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "container.Create")
|
|
|
|
}
|
2024-10-17 09:08:11 +02:00
|
|
|
} else if err != nil && bloberror.HasCode(err, bloberror.AuthorizationFailure) {
|
|
|
|
// We ignore this Auth. Failure, as the failure is related to the type
|
|
|
|
// of SAS/SAT, not an actual real failure. If the token is invalid, we
|
|
|
|
// fail later on anyway.
|
|
|
|
// For details see Issue #4004.
|
|
|
|
debug.Log("Ignoring AuthorizationFailure when calling GetProperties")
|
2022-11-18 23:04:31 +01:00
|
|
|
} else if err != nil {
|
2024-06-03 23:37:17 +02:00
|
|
|
return be, errors.Wrap(err, "container.GetProperties")
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2017-09-18 12:01:54 +02:00
|
|
|
// SetListMaxItems sets the number of list items to load per request.
|
|
|
|
func (be *Backend) SetListMaxItems(i int) {
|
|
|
|
be.listMaxItems = i
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
// IsNotExist returns true if the error is caused by a not existing file.
|
|
|
|
func (be *Backend) IsNotExist(err error) bool {
|
2022-11-18 23:04:31 +01:00
|
|
|
return bloberror.HasCode(err, bloberror.BlobNotFound)
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2024-05-11 00:11:23 +02:00
|
|
|
func (be *Backend) IsPermanentError(err error) bool {
|
|
|
|
if be.IsNotExist(err) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
var aerr *azcore.ResponseError
|
|
|
|
if errors.As(err, &aerr) {
|
|
|
|
if aerr.StatusCode == http.StatusRequestedRangeNotSatisfiable || aerr.StatusCode == http.StatusUnauthorized || aerr.StatusCode == http.StatusForbidden {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-08-07 22:20:49 +02:00
|
|
|
func (be *Backend) Connections() uint {
|
|
|
|
return be.connections
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:39:48 +01:00
|
|
|
// Hasher may return a hash function for calculating a content hash for the backend
|
|
|
|
func (be *Backend) Hasher() hash.Hash {
|
2020-12-19 12:40:50 +01:00
|
|
|
return md5.New()
|
2020-12-19 12:39:48 +01:00
|
|
|
}
|
|
|
|
|
2022-05-01 20:07:29 +02:00
|
|
|
// HasAtomicReplace returns whether Save() can atomically replace files
|
|
|
|
func (be *Backend) HasAtomicReplace() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
// Path returns the path in the bucket that is used for this backend.
|
|
|
|
func (be *Backend) Path() string {
|
|
|
|
return be.prefix
|
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
// useAccessTier determines whether to apply the configured access tier to a given file.
|
|
|
|
// For archive access tier, only data files are stored using that class; metadata
|
|
|
|
// must remain instantly accessible.
|
|
|
|
func (be *Backend) useAccessTier(h backend.Handle) bool {
|
|
|
|
notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive")
|
|
|
|
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
|
|
|
return isDataFile || notArchiveClass
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
// Save stores data in the backend at the handle.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
2017-07-08 15:38:48 +02:00
|
|
|
objName := be.Filename(h)
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
var accessTier blob.AccessTier
|
|
|
|
if be.useAccessTier(h) {
|
|
|
|
accessTier = be.accessTier
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:26:28 +02:00
|
|
|
var err error
|
2022-11-18 23:04:31 +01:00
|
|
|
if rd.Length() < saveLargeSize {
|
2018-05-31 21:26:28 +02:00
|
|
|
// if it's smaller than 256miB, then just create the file directly from the reader
|
2024-10-20 11:57:21 +02:00
|
|
|
err = be.saveSmall(ctx, objName, rd, accessTier)
|
2018-05-31 21:26:28 +02:00
|
|
|
} else {
|
|
|
|
// otherwise use the more complicated method
|
2024-10-20 11:57:21 +02:00
|
|
|
err = be.saveLarge(ctx, objName, rd, accessTier)
|
2018-05-31 21:26:28 +02:00
|
|
|
}
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
return err
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
|
2022-11-18 23:04:31 +01:00
|
|
|
blockBlobClient := be.container.NewBlockBlobClient(objName)
|
|
|
|
|
|
|
|
// upload it as a new "block", use the base64 hash for the ID
|
|
|
|
id := base64.StdEncoding.EncodeToString(rd.Hash())
|
|
|
|
|
|
|
|
buf := make([]byte, rd.Length())
|
|
|
|
_, err := io.ReadFull(rd, buf)
|
2018-05-31 21:26:28 +02:00
|
|
|
if err != nil {
|
2022-11-18 23:04:31 +01:00
|
|
|
return errors.Wrap(err, "ReadFull")
|
2018-05-31 21:26:28 +02:00
|
|
|
}
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
reader := bytes.NewReader(buf)
|
|
|
|
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
|
2023-04-07 15:05:55 +02:00
|
|
|
TransactionalValidation: blob.TransferValidationTypeMD5(rd.Hash()),
|
2022-11-18 23:04:31 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "StageBlock")
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks := []string{id}
|
2024-10-20 11:57:21 +02:00
|
|
|
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
|
|
|
|
Tier: &accessTier,
|
|
|
|
})
|
2022-11-18 23:04:31 +01:00
|
|
|
return errors.Wrap(err, "CommitBlockList")
|
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
|
2022-11-18 23:04:31 +01:00
|
|
|
blockBlobClient := be.container.NewBlockBlobClient(objName)
|
|
|
|
|
2018-05-31 21:26:28 +02:00
|
|
|
buf := make([]byte, 100*1024*1024)
|
2022-11-18 23:04:31 +01:00
|
|
|
blocks := []string{}
|
2020-12-18 23:41:29 +01:00
|
|
|
uploadedBytes := 0
|
2018-05-31 21:26:28 +02:00
|
|
|
|
|
|
|
for {
|
|
|
|
n, err := io.ReadFull(rd, buf)
|
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = nil
|
|
|
|
}
|
2022-11-18 23:04:31 +01:00
|
|
|
|
2018-05-31 21:26:28 +02:00
|
|
|
if err == io.EOF {
|
|
|
|
// end of file reached, no bytes have been read at all
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "ReadFull")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = buf[:n]
|
2020-12-18 23:41:29 +01:00
|
|
|
uploadedBytes += n
|
2018-05-31 21:26:28 +02:00
|
|
|
|
|
|
|
// upload it as a new "block", use the base64 hash for the ID
|
2020-12-19 12:40:50 +01:00
|
|
|
h := md5.Sum(buf)
|
2018-05-31 21:26:28 +02:00
|
|
|
id := base64.StdEncoding.EncodeToString(h[:])
|
2022-11-18 23:04:31 +01:00
|
|
|
|
|
|
|
reader := bytes.NewReader(buf)
|
|
|
|
debug.Log("StageBlock %v with %d bytes", id, len(buf))
|
|
|
|
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
|
2023-04-07 15:05:55 +02:00
|
|
|
TransactionalValidation: blob.TransferValidationTypeMD5(h[:]),
|
2022-11-18 23:04:31 +01:00
|
|
|
})
|
|
|
|
|
2018-05-31 21:26:28 +02:00
|
|
|
if err != nil {
|
2022-11-18 23:04:31 +01:00
|
|
|
return errors.Wrap(err, "StageBlock")
|
2018-05-31 21:26:28 +02:00
|
|
|
}
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
blocks = append(blocks, id)
|
2018-05-31 21:26:28 +02:00
|
|
|
}
|
|
|
|
|
2020-12-18 23:41:29 +01:00
|
|
|
// sanity check
|
|
|
|
if uploadedBytes != int(rd.Length()) {
|
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
|
|
|
|
}
|
|
|
|
|
2024-10-20 11:57:21 +02:00
|
|
|
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
|
|
|
|
Tier: &accessTier,
|
|
|
|
})
|
2022-11-18 23:04:31 +01:00
|
|
|
|
2018-05-31 21:26:28 +02:00
|
|
|
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
|
2022-11-18 23:04:31 +01:00
|
|
|
return errors.Wrap(err, "CommitBlockList")
|
2018-05-31 21:26:28 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 05:59:16 +01:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
2023-10-01 10:24:33 +02:00
|
|
|
return util.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
2018-01-17 05:59:16 +01:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) openReader(ctx context.Context, h backend.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-07-08 15:38:48 +02:00
|
|
|
objName := be.Filename(h)
|
2022-11-18 23:04:31 +01:00
|
|
|
blockBlobClient := be.container.NewBlobClient(objName)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
resp, err := blockBlobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{
|
|
|
|
Range: azblob.HTTPRange{
|
|
|
|
Offset: offset,
|
|
|
|
Count: int64(length),
|
|
|
|
},
|
|
|
|
})
|
2017-07-08 15:38:48 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-05-11 00:11:23 +02:00
|
|
|
if length > 0 && (resp.ContentLength == nil || *resp.ContentLength != int64(length)) {
|
|
|
|
_ = resp.Body.Close()
|
|
|
|
return nil, &azcore.ResponseError{ErrorCode: "restic-file-too-short", StatusCode: http.StatusRequestedRangeNotSatisfiable}
|
|
|
|
}
|
|
|
|
|
2023-04-07 23:02:35 +02:00
|
|
|
return resp.Body, err
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stat returns information about a blob.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) {
|
2017-07-08 15:38:48 +02:00
|
|
|
objName := be.Filename(h)
|
2022-11-18 23:04:31 +01:00
|
|
|
blobClient := be.container.NewBlobClient(objName)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
props, err := blobClient.GetProperties(ctx, nil)
|
2017-10-31 12:32:30 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2023-10-01 11:40:12 +02:00
|
|
|
return backend.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
fi := backend.FileInfo{
|
2022-11-18 23:04:31 +01:00
|
|
|
Size: *props.ContentLength,
|
2018-01-20 19:34:38 +01:00
|
|
|
Name: h.Name,
|
|
|
|
}
|
|
|
|
return fi, nil
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) Remove(ctx context.Context, h backend.Handle) error {
|
2017-07-08 15:38:48 +02:00
|
|
|
objName := be.Filename(h)
|
2022-11-18 23:04:31 +01:00
|
|
|
blob := be.container.NewBlobClient(objName)
|
2017-10-31 12:32:30 +01:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
_, err := blob.Delete(ctx, &azblob.DeleteBlobOptions{})
|
2017-10-31 12:32:30 +01:00
|
|
|
|
2022-12-11 22:04:00 +01:00
|
|
|
if be.IsNotExist(err) {
|
2022-11-18 23:04:31 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
// List runs fn for each file in the backend which has the type t. When an
|
|
|
|
// error occurs (or fn returns an error), List stops and returns it.
|
2023-10-01 11:40:12 +02:00
|
|
|
func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error {
|
2017-12-14 19:13:01 +01:00
|
|
|
prefix, _ := be.Basedir(t)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
|
|
|
// make sure prefix ends with a slash
|
2018-01-20 19:34:38 +01:00
|
|
|
if !strings.HasSuffix(prefix, "/") {
|
2017-07-08 15:38:48 +02:00
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
max := int32(be.listMaxItems)
|
|
|
|
|
|
|
|
opts := &azContainer.ListBlobsFlatOptions{
|
|
|
|
MaxResults: &max,
|
|
|
|
Prefix: &prefix,
|
2017-09-17 11:32:05 +02:00
|
|
|
}
|
2022-11-18 23:04:31 +01:00
|
|
|
lister := be.container.NewListBlobsFlatPager(opts)
|
2017-09-17 11:32:05 +02:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
for lister.More() {
|
|
|
|
resp, err := lister.NextPage(ctx)
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-31 12:32:30 +01:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
debug.Log("got %v objects", len(resp.Segment.BlobItems))
|
2018-01-20 19:34:38 +01:00
|
|
|
|
2022-11-18 23:04:31 +01:00
|
|
|
for _, item := range resp.Segment.BlobItems {
|
|
|
|
m := strings.TrimPrefix(*item.Name, prefix)
|
2018-01-20 19:34:38 +01:00
|
|
|
if m == "" {
|
|
|
|
continue
|
2017-09-17 11:32:05 +02:00
|
|
|
}
|
|
|
|
|
2023-10-01 11:40:12 +02:00
|
|
|
fi := backend.FileInfo{
|
2018-01-20 19:34:38 +01:00
|
|
|
Name: path.Base(m),
|
2022-11-18 23:04:31 +01:00
|
|
|
Size: *item.Properties.ContentLength,
|
2018-01-20 19:34:38 +01:00
|
|
|
}
|
2017-07-08 15:38:48 +02:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2017-09-17 11:32:05 +02:00
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
err := fn(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
2018-01-20 19:34:38 +01:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
2018-01-20 19:34:38 +01:00
|
|
|
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
2018-01-20 19:34:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Err()
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
|
|
|
func (be *Backend) Delete(ctx context.Context) error {
|
2023-10-01 10:24:33 +02:00
|
|
|
return util.DefaultDelete(ctx, be)
|
2017-07-08 15:38:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
|
|
|
func (be *Backend) Close() error { return nil }
|
feat(backends/s3): add warmup support before repacks and restores (#5173)
* feat(backends/s3): add warmup support before repacks and restores
This commit introduces basic support for transitioning pack files stored
in cold storage to hot storage on S3 and S3-compatible providers.
To prevent unexpected behavior for existing users, the feature is gated
behind new flags:
- `s3.enable-restore`: opt-in flag (defaults to false)
- `s3.restore-days`: number of days for the restored objects to remain
in hot storage (defaults to `7`)
- `s3.restore-timeout`: maximum time to wait for a single restoration
(default to `1 day`)
- `s3.restore-tier`: retrieval tier at which the restore will be
processed. (default to `Standard`)
As restoration times can be lengthy, this implementation preemptively
restores selected packs to prevent incessant restore-delays during
downloads. This is slightly sub-optimal as we could process packs
out-of-order (as soon as they're transitioned), but this would really
add too much complexity for a marginal gain in speed.
To maintain simplicity and prevent resources exhautions with lots of
packs, no new concurrency mechanisms or goroutines were added. This just
hooks gracefully into the existing routines.
**Limitations:**
- Tests against the backend were not written due to the lack of cold
storage class support in MinIO. Testing was done manually on
Scaleway's S3-compatible object storage. If necessary, we could
explore testing with LocalStack or mocks, though this requires further
discussion.
- Currently, this feature only warms up before restores and repacks
(prune/copy), as those are the two main use-cases I came across.
Support for other commands may be added in future iterations, as long
as affected packs can be calculated in advance.
- The feature is gated behind a new alpha `s3-restore` feature flag to
make it explicit that the feature is still wet behind the ears.
- There is no explicit user notification for ongoing pack restorations.
While I think it is not necessary because of the opt-in flag, showing
some notice may improve usability (but would probably require major
refactoring in the progress bar which I didn't want to start). Another
possibility would be to add a flag to send restores requests and fail
early.
See https://github.com/restic/restic/issues/3202
* ui: warn user when files are warming up from cold storage
* refactor: remove the PacksWarmer struct
It's easier to handle multiple handles in the backend directly, and it
may open the door to reducing the number of requests made to the backend
in the future.
2025-02-01 19:26:27 +01:00
|
|
|
|
|
|
|
// Warmup not implemented
|
|
|
|
func (be *Backend) Warmup(_ context.Context, _ []backend.Handle) ([]backend.Handle, error) {
|
|
|
|
return []backend.Handle{}, nil
|
|
|
|
}
|
|
|
|
func (be *Backend) WarmupWait(_ context.Context, _ []backend.Handle) error { return nil }
|