Remove unnecessary type conversions.

This commit is contained in:
Martin Smith 2025-02-28 19:38:33 +00:00
parent f95786dfa3
commit 6dcd3d4d8c
17 changed files with 30 additions and 30 deletions

View File

@ -465,8 +465,8 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
Printf("Others: %5d new, %5d removed\n", stats.Added.Others, stats.Removed.Others)
Printf("Data Blobs: %5d new, %5d removed\n", stats.Added.DataBlobs, stats.Removed.DataBlobs)
Printf("Tree Blobs: %5d new, %5d removed\n", stats.Added.TreeBlobs, stats.Removed.TreeBlobs)
Printf(" Added: %-5s\n", ui.FormatBytes(uint64(stats.Added.Bytes)))
Printf(" Removed: %-5s\n", ui.FormatBytes(uint64(stats.Removed.Bytes)))
Printf(" Added: %-5s\n", ui.FormatBytes(stats.Added.Bytes))
Printf(" Removed: %-5s\n", ui.FormatBytes(stats.Removed.Bytes))
}
return nil

View File

@ -299,7 +299,7 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer,
func makeFileIDByContents(node *restic.Node) fileID {
var bb []byte
for _, c := range node.Content {
bb = append(bb, []byte(c[:])...)
bb = append(bb, c[:]...)
}
return sha256.Sum256(bb)
}

View File

@ -1006,11 +1006,11 @@ func TestArchiverSaveTree(t *testing.T) {
}{
{
src: TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
targets: []string{"targetfile"},
want: TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
stat: Summary{
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
@ -1021,12 +1021,12 @@ func TestArchiverSaveTree(t *testing.T) {
},
{
src: TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
prepare: symlink("targetfile", "filesymlink"),
targets: []string{"targetfile", "filesymlink"},
want: TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
"filesymlink": TestSymlink{Target: "targetfile"},
},
stat: Summary{
@ -1041,10 +1041,10 @@ func TestArchiverSaveTree(t *testing.T) {
"dir": TestDir{
"subdir": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
},
"otherfile": TestFile{Content: string("xxx")},
"otherfile": TestFile{Content: "xxx"},
},
},
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
@ -1066,10 +1066,10 @@ func TestArchiverSaveTree(t *testing.T) {
"dir": TestDir{
"subdir": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
},
"otherfile": TestFile{Content: string("xxx")},
"otherfile": TestFile{Content: "xxx"},
},
},
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
@ -1078,7 +1078,7 @@ func TestArchiverSaveTree(t *testing.T) {
"dir": TestDir{
"symlink": TestDir{
"subsubdir": TestDir{
"targetfile": TestFile{Content: string("foobar")},
"targetfile": TestFile{Content: "foobar"},
},
},
},
@ -1696,8 +1696,8 @@ func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) {
rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed")
bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs))
bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs))
bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded))
bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked))
bothZeroOrNeither(t, stat.DataSize+stat.TreeSize, sn.Summary.DataAdded)
bothZeroOrNeither(t, stat.DataSizeInRepo+stat.TreeSizeInRepo, sn.Summary.DataAddedPacked)
}
func TestArchiverParent(t *testing.T) {

View File

@ -340,7 +340,7 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend
fi := backend.FileInfo{
Name: path.Base(m),
Size: int64(attrs.Size),
Size: attrs.Size,
}
err = fn(fi)

View File

@ -36,7 +36,7 @@ func TestLimiterWrapping(t *testing.T) {
func TestReadLimiter(t *testing.T) {
reader := bytes.NewReader(make([]byte, 300))
limiter := rate.NewLimiter(rate.Limit(10000), int(100))
limiter := rate.NewLimiter(rate.Limit(10000), 100)
limReader := rateLimitedReader{reader, limiter}
n, err := limReader.Read([]byte{})
@ -54,7 +54,7 @@ func TestReadLimiter(t *testing.T) {
func TestWriteLimiter(t *testing.T) {
writer := &bytes.Buffer{}
limiter := rate.NewLimiter(rate.Limit(10000), int(100))
limiter := rate.NewLimiter(rate.Limit(10000), 100)
limReader := rateLimitedWriter{writer, limiter}
n, err := limReader.Write([]byte{})

View File

@ -304,7 +304,7 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind
opts.StorageClass = be.cfg.StorageClass
}
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), rd.Length(), opts)
// sanity check
if err == nil && info.Size != rd.Length() {

View File

@ -101,7 +101,7 @@ func countingBlocker() (func(), func(int) int) {
}
func concurrencyTester(t *testing.T, setup func(m *mock.Backend), handler func(be backend.Backend) func() error, unblock func(int) int, isUnlimited bool) {
expectBlocked := int(2)
expectBlocked := 2
workerCount := expectBlocked + 1
m := mock.NewBackend()

View File

@ -696,7 +696,7 @@ var testStrings = []struct {
func store(t testing.TB, b backend.Backend, tpe backend.FileType, data []byte) backend.Handle {
id := restic.Hash(data)
h := backend.Handle{Name: id.String(), Type: tpe}
err := b.Save(context.TODO(), h, backend.NewByteReader([]byte(data), b.Hasher()))
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
test.OK(t, err)
return h
}

View File

@ -32,7 +32,7 @@ func TestDefaultLoad(t *testing.T) {
// happy case, assert correct parameters are passed around and content stream is closed
err := util.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih backend.Handle, length int, offset int64) (io.ReadCloser, error) {
rtest.Equals(t, h, ih)
rtest.Equals(t, int(10), length)
rtest.Equals(t, 10, length)
rtest.Equals(t, int64(11), offset)
return rd, nil

View File

@ -150,7 +150,7 @@ func getFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen ui
if restartScan {
_p1 = 1
}
r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), uintptr(eaList), uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1))
r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), eaList, uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1))
status = ntStatus(r0)
return
}

View File

@ -105,7 +105,7 @@ func clearAttribute(path string, attribute uint32) error {
}
if fileAttributes&attribute != 0 {
// Clear the attribute
fileAttributes &= ^uint32(attribute)
fileAttributes &= ^attribute
err = windows.SetFileAttributes(ptr, fileAttributes)
if err != nil {
return err

View File

@ -211,6 +211,6 @@ func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, e
// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR
// into a security descriptor bytes representation.
func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) {
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
b := unsafe.Slice(unsafe.Pointer(sd), sd.Length())
return b, nil
}

View File

@ -62,7 +62,7 @@ func TestPackerManager(t *testing.T) {
func testPackerManager(t testing.TB) int64 {
rnd := rand.New(rand.NewSource(randomSeed))
savedBytes := int(0)
savedBytes := 0
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *packer) error {
err := p.Finalize()
if err != nil {
@ -83,7 +83,7 @@ func testPackerManager(t testing.TB) int64 {
}
func TestPackerManagerWithOversizeBlob(t *testing.T) {
packFiles := int(0)
packFiles := 0
sizeLimit := uint(512 * 1024)
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *packer) error {
packFiles++

View File

@ -272,7 +272,7 @@ func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, bu
continue
}
it := newPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
it := newPackBlobIterator(blob.PackID, newByteReader(buf), blob.Offset, []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
pbv, err := it.Next()
if err == nil {

View File

@ -494,7 +494,7 @@ func OSAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *r
}
// Insert the field into the map
attrs[getFQKey(field, keyPrefix)] = json.RawMessage(fieldBytes)
attrs[getFQKey(field, keyPrefix)] = fieldBytes
}
return attrs, nil
}

View File

@ -399,5 +399,5 @@ func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) {
if file.state == nil {
action = restore.ActionFileRestored
}
r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size))
r.progress.AddProgress(file.location, action, blobSize, uint64(file.size))
}

View File

@ -54,7 +54,7 @@ func TestCounter(t *testing.T) {
test.Assert(t, increasing, "values not increasing")
test.Equals(t, uint64(N), last)
test.Equals(t, uint64(42), lastTotal)
test.Equals(t, int(1), nmaxChange)
test.Equals(t, 1, nmaxChange)
t.Log("number of calls:", ncalls)
}