mirror of
https://github.com/navidrome/navidrome.git
synced 2025-04-04 04:57:37 +03:00
Small fixes/comments
This commit is contained in:
parent
4bb9881f4e
commit
e4c1b8c5e0
5 changed files with 11 additions and 6 deletions
|
@ -126,7 +126,11 @@ func do_put_map(id C.ulong, key string, val *C.char) {
|
|||
}
|
||||
|
||||
/*
|
||||
As I'm working on the new scanner, I see that the `properties` from TagLib is ill-suited to extract multi-valued ID3 frames. I'll have to change the way we do it for ID3, probably by sending the raw frames to Go and mapping there, instead of relying on the auto-mapped `properties`. I think this would reduce our reliance on C++, while also giving us more flexibility, including parsing the USLT / SYLT frames in Go
|
||||
TODO: Validate this assumption:
|
||||
"As I'm working on the new scanner, I see that the `properties` from TagLib is ill-suited to extract multi-valued
|
||||
ID3 frames. I'll have to change the way we do it for ID3, probably by sending the raw frames to Go and mapping there,
|
||||
instead of relying on the auto-mapped `properties`. I think this would reduce our reliance on C++, while also giving
|
||||
us more flexibility, including parsing the USLT / SYLT frames in Go (https://github.com/n10v/id3v2/pull/64)"
|
||||
*/
|
||||
|
||||
//export go_map_put_int
|
||||
|
|
|
@ -24,6 +24,6 @@ type folderEntry struct {
|
|||
missingTracks model.MediaFiles
|
||||
}
|
||||
|
||||
func (f *folderEntry) isExpired() bool {
|
||||
func (f *folderEntry) isOutdated() bool {
|
||||
return f.updTime.Before(f.modTime)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func processFolder(ctx context.Context) pipeline.StageFn[*folderEntry] {
|
|||
}
|
||||
dbTracks := slice.ToMap(mfs, func(mf model.MediaFile) (string, model.MediaFile) { return mf.Path, mf })
|
||||
|
||||
// Get list of files to import, leave dbTracks with tracks to be removed
|
||||
// Get list of files to import, leave in dbTracks only tracks that are missing
|
||||
var filesToImport []string
|
||||
for afPath, af := range entry.audioFiles {
|
||||
fullPath := filepath.Join(entry.path, afPath)
|
||||
|
|
|
@ -31,6 +31,7 @@ func produceFolders(ctx context.Context, ds model.DataStore, libs []model.Librar
|
|||
}
|
||||
}()
|
||||
return func(put func(entry *folderEntry)) error {
|
||||
// TODO Parallelize multiple scanCtx
|
||||
var total int64
|
||||
for scanCtx := range pl.ReadOrDone(ctx, scanCtxChan) {
|
||||
outputChan, err := walkDirTree(ctx, scanCtx)
|
||||
|
@ -76,7 +77,7 @@ func walkFolder(ctx context.Context, scanCtx *scanContext, currentFolder string,
|
|||
}
|
||||
}
|
||||
|
||||
if !folder.isExpired() && !scanCtx.fullRescan {
|
||||
if !folder.isOutdated() && !scanCtx.fullRescan {
|
||||
return nil
|
||||
}
|
||||
dir := filepath.Clean(currentFolder)
|
||||
|
|
|
@ -47,9 +47,9 @@ func (s *scanner2) RescanAll(requestCtx context.Context, fullRescan bool) error
|
|||
}
|
||||
|
||||
func (s *scanner2) runPipeline(producer pipeline.Producer[*folderEntry], stages ...pipeline.Stage[*folderEntry]) error {
|
||||
if log.CurrentLevel() >= log.LevelDebug {
|
||||
if log.IsGreaterOrEqualTo(log.LevelDebug) {
|
||||
metrics, err := pipeline.Measure(producer, stages...)
|
||||
log.Trace(metrics.String())
|
||||
log.Info(metrics.String(), err)
|
||||
return err
|
||||
}
|
||||
return pipeline.Do(producer, stages...)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue