Remove potential integer overflow conversion uint64 -> int64

This commit is contained in:
Deluan 2024-08-22 19:28:22 -04:00
parent 5d81849603
commit c95fa11a2f
4 changed files with 14 additions and 12 deletions

View file

@ -207,7 +207,7 @@ func newFSCache(name, cacheSize, cacheFolder string, maxItems int) (fscache.Cach
return nil, nil
}
lru := NewFileHaunter(name, maxItems, int64(size), consts.DefaultCacheCleanUpInterval)
lru := NewFileHaunter(name, maxItems, size, consts.DefaultCacheCleanUpInterval)
h := fscache.NewLRUHaunterStrategy(lru)
cacheFolder = filepath.Join(conf.Server.CacheFolder, cacheFolder)

View file

@ -21,7 +21,7 @@ type haunterKV struct {
// If maxItems or maxSize are 0, they won't be checked
//
// Based on fscache.NewLRUHaunter
func NewFileHaunter(name string, maxItems int, maxSize int64, period time.Duration) fscache.LRUHaunter {
func NewFileHaunter(name string, maxItems int, maxSize uint64, period time.Duration) fscache.LRUHaunter {
return &fileHaunter{
name: name,
period: period,
@ -34,7 +34,7 @@ type fileHaunter struct {
name string
period time.Duration
maxItems int
maxSize int64
maxSize uint64
}
func (j *fileHaunter) Next() time.Duration {
@ -43,10 +43,10 @@ func (j *fileHaunter) Next() time.Duration {
func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
var count int
var size int64
var size uint64
var okFiles []haunterKV
log.Trace("Running cache cleanup", "cache", j.name, "maxSize", humanize.Bytes(uint64(j.maxSize)))
log.Trace("Running cache cleanup", "cache", j.name, "maxSize", humanize.Bytes(j.maxSize))
c.EnumerateEntries(func(key string, e fscache.Entry) bool {
if e.InUse() {
return true
@ -63,7 +63,7 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
}
count++
size = size + fileInfo.Size()
size = size + uint64(fileInfo.Size())
okFiles = append(okFiles, haunterKV{
key: key,
value: e,
@ -94,7 +94,7 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
return true
}
log.Trace("Current cache stats", "cache", j.name, "size", humanize.Bytes(uint64(size)), "numItems", count)
log.Trace("Current cache stats", "cache", j.name, "size", humanize.Bytes(size), "numItems", count)
if j.maxItems > 0 {
for count > j.maxItems {
@ -118,13 +118,13 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
return keysToReap
}
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size int64) (*string, int, int64, error) {
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size uint64) (*string, int, uint64, error) {
var f haunterKV
f, *items = (*items)[0], (*items)[1:]
count--
size = size - f.info.Size()
size = size - uint64(f.info.Size())
return &f.key, count, size, nil
}

View file

@ -20,7 +20,7 @@ var _ = Describe("FileHaunter", func() {
var cacheDir string
var err error
var maxItems int
var maxSize int64
var maxSize uint64
JustBeforeEach(func() {
tempDir, _ := os.MkdirTemp("", "spread_fs")
@ -29,7 +29,9 @@ var _ = Describe("FileHaunter", func() {
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() { _ = os.RemoveAll(tempDir) })
fsCache, err = fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(cache.NewFileHaunter("", maxItems, maxSize, 300*time.Millisecond)))
fsCache, err = fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(
cache.NewFileHaunter("", maxItems, maxSize, 300*time.Millisecond),
))
Expect(err).ToNot(HaveOccurred())
DeferCleanup(fsCache.Clean)