2016-02-20 22:05:48 +01:00
|
|
|
package rest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
"io/ioutil"
|
2016-02-20 22:05:48 +01:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"path"
|
2016-08-31 22:39:36 +02:00
|
|
|
"restic"
|
2016-02-21 16:35:25 +01:00
|
|
|
"strings"
|
2016-02-20 22:05:48 +01:00
|
|
|
|
2017-01-22 22:01:12 +01:00
|
|
|
"restic/debug"
|
2016-09-01 22:17:37 +02:00
|
|
|
"restic/errors"
|
2016-08-21 17:46:23 +02:00
|
|
|
|
2016-02-20 22:05:48 +01:00
|
|
|
"restic/backend"
|
|
|
|
)
|
|
|
|
|
|
|
|
const connLimit = 10
|
|
|
|
|
2017-01-22 12:32:20 +01:00
|
|
|
// make sure the rest backend implements restic.Backend
|
|
|
|
var _ restic.Backend = &restBackend{}
|
|
|
|
|
2016-02-20 22:05:48 +01:00
|
|
|
// restPath returns the path to the given resource.
|
2016-08-31 22:39:36 +02:00
|
|
|
func restPath(url *url.URL, h restic.Handle) string {
|
2016-02-21 16:35:25 +01:00
|
|
|
u := *url
|
2016-02-20 22:05:48 +01:00
|
|
|
|
|
|
|
var dir string
|
|
|
|
|
2016-09-01 21:19:30 +02:00
|
|
|
switch h.Type {
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.ConfigFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = ""
|
2016-02-21 16:35:25 +01:00
|
|
|
h.Name = "config"
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.DataFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = backend.Paths.Data
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.SnapshotFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = backend.Paths.Snapshots
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.IndexFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = backend.Paths.Index
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.LockFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = backend.Paths.Locks
|
2016-08-31 22:39:36 +02:00
|
|
|
case restic.KeyFile:
|
2016-02-20 22:05:48 +01:00
|
|
|
dir = backend.Paths.Keys
|
|
|
|
default:
|
2016-09-01 21:19:30 +02:00
|
|
|
dir = string(h.Type)
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
2016-02-21 16:35:25 +01:00
|
|
|
u.Path = path.Join(url.Path, dir, h.Name)
|
|
|
|
|
|
|
|
return u.String()
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type restBackend struct {
|
|
|
|
url *url.URL
|
|
|
|
connChan chan struct{}
|
2016-02-21 17:06:35 +01:00
|
|
|
client http.Client
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the REST backend with the given config.
|
2016-08-31 22:51:35 +02:00
|
|
|
func Open(cfg Config) (restic.Backend, error) {
|
2016-02-20 22:05:48 +01:00
|
|
|
connChan := make(chan struct{}, connLimit)
|
|
|
|
for i := 0; i < connLimit; i++ {
|
|
|
|
connChan <- struct{}{}
|
|
|
|
}
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
tr := &http.Transport{MaxIdleConnsPerHost: connLimit}
|
2016-02-20 22:05:48 +01:00
|
|
|
client := http.Client{Transport: tr}
|
|
|
|
|
2016-02-21 17:06:35 +01:00
|
|
|
return &restBackend{url: cfg.URL, connChan: connChan, client: client}, nil
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Location returns this backend's location (the server's URL).
|
|
|
|
func (b *restBackend) Location() string {
|
|
|
|
return b.url.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save stores data in the backend at the handle.
|
2017-01-22 12:32:20 +01:00
|
|
|
func (b *restBackend) Save(h restic.Handle, rd io.Reader) (err error) {
|
2016-02-20 22:05:48 +01:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
<-b.connChan
|
2017-01-22 12:32:20 +01:00
|
|
|
resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", rd)
|
2016-02-20 22:05:48 +01:00
|
|
|
b.connChan <- struct{}{}
|
|
|
|
|
|
|
|
if resp != nil {
|
|
|
|
defer func() {
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
2016-02-20 22:05:48 +01:00
|
|
|
e := resp.Body.Close()
|
|
|
|
|
|
|
|
if err == nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
err = errors.Wrap(e, "Close")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
return errors.Wrap(err, "client.Post")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
2016-08-21 17:48:36 +02:00
|
|
|
return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-23 18:11:10 +01:00
|
|
|
// Load returns a reader that yields the contents of the file at h at the
|
2017-01-22 22:01:12 +01:00
|
|
|
// given offset. If length is nonzero, only a portion of the file is
|
|
|
|
// returned. rd must be closed after use.
|
2017-01-23 18:11:10 +01:00
|
|
|
func (b *restBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
|
|
|
debug.Log("Load %v, length %v, offset %v", h, length, offset)
|
2017-01-22 22:01:12 +01:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if offset < 0 {
|
|
|
|
return nil, errors.New("offset is negative")
|
|
|
|
}
|
|
|
|
|
|
|
|
if length < 0 {
|
|
|
|
return nil, errors.Errorf("invalid length %d", length)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("GET", restPath(b.url, h), nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "http.NewRequest")
|
|
|
|
}
|
|
|
|
|
|
|
|
byteRange := fmt.Sprintf("bytes=%d-", offset)
|
|
|
|
if length > 0 {
|
|
|
|
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
|
|
|
|
}
|
|
|
|
req.Header.Add("Range", byteRange)
|
2017-01-23 18:11:10 +01:00
|
|
|
debug.Log("Load(%v) send range %v", h, byteRange)
|
2017-01-22 22:01:12 +01:00
|
|
|
|
|
|
|
<-b.connChan
|
|
|
|
resp, err := b.client.Do(req)
|
|
|
|
b.connChan <- struct{}{}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if resp != nil {
|
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
resp.Body.Close()
|
|
|
|
}
|
|
|
|
return nil, errors.Wrap(err, "client.Do")
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 && resp.StatusCode != 206 {
|
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
resp.Body.Close()
|
|
|
|
return nil, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp.Body, nil
|
|
|
|
}
|
|
|
|
|
2016-02-20 22:05:48 +01:00
|
|
|
// Stat returns information about a blob.
|
2016-08-31 22:51:35 +02:00
|
|
|
func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
|
2016-02-20 22:05:48 +01:00
|
|
|
if err := h.Valid(); err != nil {
|
2016-08-31 22:51:35 +02:00
|
|
|
return restic.FileInfo{}, err
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
<-b.connChan
|
2016-02-21 17:06:35 +01:00
|
|
|
resp, err := b.client.Head(restPath(b.url, h))
|
2016-02-20 22:05:48 +01:00
|
|
|
b.connChan <- struct{}{}
|
|
|
|
if err != nil {
|
2016-08-31 22:51:35 +02:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
2016-02-20 22:05:48 +01:00
|
|
|
if err = resp.Body.Close(); err != nil {
|
2016-08-31 22:51:35 +02:00
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "Close")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
2016-08-31 22:51:35 +02:00
|
|
|
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.ContentLength < 0 {
|
2016-08-31 22:51:35 +02:00
|
|
|
return restic.FileInfo{}, errors.New("negative content length")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
2016-08-31 22:51:35 +02:00
|
|
|
bi := restic.FileInfo{
|
2016-02-20 22:05:48 +01:00
|
|
|
Size: resp.ContentLength,
|
|
|
|
}
|
|
|
|
|
|
|
|
return bi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test returns true if a blob of the given type and name exists in the backend.
|
2016-08-31 22:39:36 +02:00
|
|
|
func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
|
2016-09-01 21:19:30 +02:00
|
|
|
_, err := b.Stat(restic.Handle{Type: t, Name: name})
|
2016-02-20 22:05:48 +01:00
|
|
|
if err != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
2016-08-31 22:39:36 +02:00
|
|
|
func (b *restBackend) Remove(t restic.FileType, name string) error {
|
2016-09-01 21:19:30 +02:00
|
|
|
h := restic.Handle{Type: t, Name: name}
|
2016-02-20 22:05:48 +01:00
|
|
|
if err := h.Valid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
req, err := http.NewRequest("DELETE", restPath(b.url, h), nil)
|
|
|
|
if err != nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
return errors.Wrap(err, "http.NewRequest")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
<-b.connChan
|
2016-02-21 17:06:35 +01:00
|
|
|
resp, err := b.client.Do(req)
|
2016-02-20 22:05:48 +01:00
|
|
|
b.connChan <- struct{}{}
|
|
|
|
|
|
|
|
if err != nil {
|
2016-08-29 21:54:50 +02:00
|
|
|
return errors.Wrap(err, "client.Do")
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if resp.StatusCode != 200 {
|
|
|
|
return errors.New("blob not removed")
|
|
|
|
}
|
|
|
|
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
2016-02-20 22:05:48 +01:00
|
|
|
return resp.Body.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a channel that yields all names of blobs of type t. A
|
|
|
|
// goroutine is started for this. If the channel done is closed, sending
|
|
|
|
// stops.
|
2016-08-31 22:39:36 +02:00
|
|
|
func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
2016-02-20 22:05:48 +01:00
|
|
|
ch := make(chan string)
|
|
|
|
|
2016-09-01 21:19:30 +02:00
|
|
|
url := restPath(b.url, restic.Handle{Type: t})
|
2016-02-21 16:35:25 +01:00
|
|
|
if !strings.HasSuffix(url, "/") {
|
|
|
|
url += "/"
|
|
|
|
}
|
|
|
|
|
2016-02-20 22:05:48 +01:00
|
|
|
<-b.connChan
|
2016-02-21 17:06:35 +01:00
|
|
|
resp, err := b.client.Get(url)
|
2016-02-20 22:05:48 +01:00
|
|
|
b.connChan <- struct{}{}
|
|
|
|
|
|
|
|
if resp != nil {
|
Fix REST backend HTTP keepalive
This is subtle. A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage. Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state. When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.
To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down. Yeah, restic-server is that
fast. :)
As it turns out, this behavior was product of 2 subtle issues:
1) The body of HTTP response wasn't read completely with io.ReadFull()
at the end of the Load() function. This deactivated HTTP keepalive,
so already open connections were not reused, but closed instead, and
new ones opened for every new request. io.Copy(ioutil.Discard,
resp.Body) before resp.Body.Close() remedies this.
2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
default value of 2 wasn't enough to stop reconnecting. It is hard to
understand why this would be so detrimental, it could even be some
subtle Go runtime bug. Anyhow, setting this value to match the
connection limit, as set by connLimit global variable, finally nails
this ugly bug.
I fixed several other places where the response body wasn't read in
full (or at all). For example, json.NewDecoder() is also known not to
read the whole body of response.
Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server. Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up. But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-09 22:37:20 +01:00
|
|
|
defer func() {
|
|
|
|
io.Copy(ioutil.Discard, resp.Body)
|
|
|
|
e := resp.Body.Close()
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
err = errors.Wrap(e, "Close")
|
|
|
|
}
|
|
|
|
}()
|
2016-02-20 22:05:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
close(ch)
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
dec := json.NewDecoder(resp.Body)
|
|
|
|
var list []string
|
|
|
|
if err = dec.Decode(&list); err != nil {
|
|
|
|
close(ch)
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
for _, m := range list {
|
|
|
|
select {
|
|
|
|
case ch <- m:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes all open files.
|
|
|
|
func (b *restBackend) Close() error {
|
|
|
|
// this does not need to do anything, all open files are closed within the
|
|
|
|
// same function.
|
|
|
|
return nil
|
|
|
|
}
|