mirror of
https://github.com/navidrome/navidrome.git
synced 2025-04-04 21:17:37 +03:00
Remove potential integer overflow conversion uint64 -> int64
This commit is contained in:
parent
5d81849603
commit
c95fa11a2f
4 changed files with 14 additions and 12 deletions
|
@ -34,4 +34,4 @@ linters-settings:
|
||||||
- G501
|
- G501
|
||||||
- G401
|
- G401
|
||||||
- G505
|
- G505
|
||||||
- G115 # Temporarily disabled, see discussion in https://github.com/securego/gosec/pull/1149
|
- G115 # Can't check context, where the warning is clearly a false positive. See discussion in https://github.com/securego/gosec/pull/1149
|
||||||
|
|
2
utils/cache/file_caches.go
vendored
2
utils/cache/file_caches.go
vendored
|
@ -207,7 +207,7 @@ func newFSCache(name, cacheSize, cacheFolder string, maxItems int) (fscache.Cach
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
lru := NewFileHaunter(name, maxItems, int64(size), consts.DefaultCacheCleanUpInterval)
|
lru := NewFileHaunter(name, maxItems, size, consts.DefaultCacheCleanUpInterval)
|
||||||
h := fscache.NewLRUHaunterStrategy(lru)
|
h := fscache.NewLRUHaunterStrategy(lru)
|
||||||
cacheFolder = filepath.Join(conf.Server.CacheFolder, cacheFolder)
|
cacheFolder = filepath.Join(conf.Server.CacheFolder, cacheFolder)
|
||||||
|
|
||||||
|
|
16
utils/cache/file_haunter.go
vendored
16
utils/cache/file_haunter.go
vendored
|
@ -21,7 +21,7 @@ type haunterKV struct {
|
||||||
// If maxItems or maxSize are 0, they won't be checked
|
// If maxItems or maxSize are 0, they won't be checked
|
||||||
//
|
//
|
||||||
// Based on fscache.NewLRUHaunter
|
// Based on fscache.NewLRUHaunter
|
||||||
func NewFileHaunter(name string, maxItems int, maxSize int64, period time.Duration) fscache.LRUHaunter {
|
func NewFileHaunter(name string, maxItems int, maxSize uint64, period time.Duration) fscache.LRUHaunter {
|
||||||
return &fileHaunter{
|
return &fileHaunter{
|
||||||
name: name,
|
name: name,
|
||||||
period: period,
|
period: period,
|
||||||
|
@ -34,7 +34,7 @@ type fileHaunter struct {
|
||||||
name string
|
name string
|
||||||
period time.Duration
|
period time.Duration
|
||||||
maxItems int
|
maxItems int
|
||||||
maxSize int64
|
maxSize uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *fileHaunter) Next() time.Duration {
|
func (j *fileHaunter) Next() time.Duration {
|
||||||
|
@ -43,10 +43,10 @@ func (j *fileHaunter) Next() time.Duration {
|
||||||
|
|
||||||
func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
|
func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
|
||||||
var count int
|
var count int
|
||||||
var size int64
|
var size uint64
|
||||||
var okFiles []haunterKV
|
var okFiles []haunterKV
|
||||||
|
|
||||||
log.Trace("Running cache cleanup", "cache", j.name, "maxSize", humanize.Bytes(uint64(j.maxSize)))
|
log.Trace("Running cache cleanup", "cache", j.name, "maxSize", humanize.Bytes(j.maxSize))
|
||||||
c.EnumerateEntries(func(key string, e fscache.Entry) bool {
|
c.EnumerateEntries(func(key string, e fscache.Entry) bool {
|
||||||
if e.InUse() {
|
if e.InUse() {
|
||||||
return true
|
return true
|
||||||
|
@ -63,7 +63,7 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
count++
|
count++
|
||||||
size = size + fileInfo.Size()
|
size = size + uint64(fileInfo.Size())
|
||||||
okFiles = append(okFiles, haunterKV{
|
okFiles = append(okFiles, haunterKV{
|
||||||
key: key,
|
key: key,
|
||||||
value: e,
|
value: e,
|
||||||
|
@ -94,7 +94,7 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("Current cache stats", "cache", j.name, "size", humanize.Bytes(uint64(size)), "numItems", count)
|
log.Trace("Current cache stats", "cache", j.name, "size", humanize.Bytes(size), "numItems", count)
|
||||||
|
|
||||||
if j.maxItems > 0 {
|
if j.maxItems > 0 {
|
||||||
for count > j.maxItems {
|
for count > j.maxItems {
|
||||||
|
@ -118,13 +118,13 @@ func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
|
||||||
return keysToReap
|
return keysToReap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size int64) (*string, int, int64, error) {
|
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size uint64) (*string, int, uint64, error) {
|
||||||
var f haunterKV
|
var f haunterKV
|
||||||
|
|
||||||
f, *items = (*items)[0], (*items)[1:]
|
f, *items = (*items)[0], (*items)[1:]
|
||||||
|
|
||||||
count--
|
count--
|
||||||
size = size - f.info.Size()
|
size = size - uint64(f.info.Size())
|
||||||
|
|
||||||
return &f.key, count, size, nil
|
return &f.key, count, size, nil
|
||||||
}
|
}
|
||||||
|
|
6
utils/cache/file_haunter_test.go
vendored
6
utils/cache/file_haunter_test.go
vendored
|
@ -20,7 +20,7 @@ var _ = Describe("FileHaunter", func() {
|
||||||
var cacheDir string
|
var cacheDir string
|
||||||
var err error
|
var err error
|
||||||
var maxItems int
|
var maxItems int
|
||||||
var maxSize int64
|
var maxSize uint64
|
||||||
|
|
||||||
JustBeforeEach(func() {
|
JustBeforeEach(func() {
|
||||||
tempDir, _ := os.MkdirTemp("", "spread_fs")
|
tempDir, _ := os.MkdirTemp("", "spread_fs")
|
||||||
|
@ -29,7 +29,9 @@ var _ = Describe("FileHaunter", func() {
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
DeferCleanup(func() { _ = os.RemoveAll(tempDir) })
|
DeferCleanup(func() { _ = os.RemoveAll(tempDir) })
|
||||||
|
|
||||||
fsCache, err = fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(cache.NewFileHaunter("", maxItems, maxSize, 300*time.Millisecond)))
|
fsCache, err = fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(
|
||||||
|
cache.NewFileHaunter("", maxItems, maxSize, 300*time.Millisecond),
|
||||||
|
))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
DeferCleanup(fsCache.Clean)
|
DeferCleanup(fsCache.Clean)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue