mirror of
https://github.com/navidrome/navidrome.git
synced 2025-04-03 20:47:35 +03:00
Call ffmpeg
in batches
This commit is contained in:
parent
6a3dabbb06
commit
3190611ec8
4 changed files with 75 additions and 36 deletions
|
@ -120,17 +120,7 @@ func (r *playlistTrackRepository) Update(mediaFileIds []string) error {
|
|||
}
|
||||
|
||||
// Break the track list in chunks to avoid hitting SQLITE_MAX_FUNCTION_ARG limit
|
||||
numTracks := len(mediaFileIds)
|
||||
const chunkSize = 50
|
||||
var chunks [][]string
|
||||
for i := 0; i < numTracks; i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > numTracks {
|
||||
end = numTracks
|
||||
}
|
||||
|
||||
chunks = append(chunks, mediaFileIds[i:end])
|
||||
}
|
||||
chunks := utils.BreakUpStringSlice(mediaFileIds, 50)
|
||||
|
||||
// Add new tracks, chunk by chunk
|
||||
pos := 1
|
||||
|
|
|
@ -31,8 +31,18 @@ func NewTagScanner(rootFolder string, ds model.DataStore) *TagScanner {
|
|||
}
|
||||
}
|
||||
|
||||
type ArtistMap map[string]bool
|
||||
type AlbumMap map[string]bool
|
||||
type (
|
||||
ArtistMap map[string]bool
|
||||
AlbumMap map[string]bool
|
||||
)
|
||||
|
||||
const (
|
||||
// batchSize used for albums/artists updates
|
||||
batchSize = 5
|
||||
|
||||
// filesBatchSize used for extract file metadata
|
||||
filesBatchSize = 100
|
||||
)
|
||||
|
||||
// Scan algorithm overview:
|
||||
// For each changed: Get all files from DB that starts with the folder, scan each file:
|
||||
|
@ -162,29 +172,33 @@ func (s *TagScanner) processChangedDir(ctx context.Context, dir string, updatedA
|
|||
numPurgedTracks := 0
|
||||
|
||||
if len(filesToUpdate) > 0 {
|
||||
// Load tracks Metadata from the folder
|
||||
newTracks, err := s.loadTracks(filesToUpdate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Break the file list in chunks to avoid calling ffmpeg with too many parameters
|
||||
chunks := utils.BreakUpStringSlice(filesToUpdate, filesBatchSize)
|
||||
for _, chunk := range chunks {
|
||||
// Load tracks Metadata from the folder
|
||||
newTracks, err := s.loadTracks(chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If track from folder is newer than the one in DB, update/insert in DB
|
||||
log.Trace("Updating mediaFiles in DB", "dir", dir, "files", filesToUpdate, "numFiles", len(filesToUpdate))
|
||||
for i := range newTracks {
|
||||
n := newTracks[i]
|
||||
err := s.ds.MediaFile(ctx).Put(&n)
|
||||
if err != nil {
|
||||
return err
|
||||
// If track from folder is newer than the one in DB, update/insert in DB
|
||||
log.Trace("Updating mediaFiles in DB", "dir", dir, "files", chunk, "numFiles", len(chunk))
|
||||
for i := range newTracks {
|
||||
n := newTracks[i]
|
||||
err := s.ds.MediaFile(ctx).Put(&n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.updateAlbum(ctx, n.AlbumID, updatedAlbums)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.updateArtist(ctx, n.AlbumArtistID, updatedArtists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numUpdatedTracks++
|
||||
}
|
||||
err = s.updateAlbum(ctx, n.AlbumID, updatedAlbums)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.updateArtist(ctx, n.AlbumArtistID, updatedArtists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numUpdatedTracks++
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,7 +227,7 @@ func (s *TagScanner) processChangedDir(ctx context.Context, dir string, updatedA
|
|||
|
||||
func (s *TagScanner) updateAlbum(ctx context.Context, albumId string, updatedAlbums AlbumMap) error {
|
||||
updatedAlbums[albumId] = true
|
||||
if len(updatedAlbums) > 5 {
|
||||
if len(updatedAlbums) >= batchSize {
|
||||
err := s.flushAlbums(ctx, updatedAlbums)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -224,7 +238,7 @@ func (s *TagScanner) updateAlbum(ctx context.Context, albumId string, updatedAlb
|
|||
|
||||
func (s *TagScanner) updateArtist(ctx context.Context, artistId string, updatedArtists ArtistMap) error {
|
||||
updatedArtists[artistId] = true
|
||||
if len(updatedArtists) > 5 {
|
||||
if len(updatedArtists) >= batchSize {
|
||||
err := s.flushArtists(ctx, updatedArtists)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -38,3 +38,17 @@ func MoveString(array []string, srcIndex int, dstIndex int) []string {
|
|||
value := array[srcIndex]
|
||||
return InsertString(RemoveString(array, srcIndex), value, dstIndex)
|
||||
}
|
||||
|
||||
func BreakUpStringSlice(mediaFileIds []string, chunkSize int) [][]string {
|
||||
numTracks := len(mediaFileIds)
|
||||
var chunks [][]string
|
||||
for i := 0; i < numTracks; i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > numTracks {
|
||||
end = numTracks
|
||||
}
|
||||
|
||||
chunks = append(chunks, mediaFileIds[i:end])
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
|
|
@ -60,4 +60,25 @@ var _ = Describe("Strings", func() {
|
|||
Expect(MoveString([]string{"1", "2", "3"}, 1, 1)).To(ConsistOf("1", "2", "3"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("BreakUpStringSlice", func() {
|
||||
It("returns no chunks if slice is empty", func() {
|
||||
var slice []string
|
||||
chunks := BreakUpStringSlice(slice, 10)
|
||||
Expect(chunks).To(HaveLen(0))
|
||||
})
|
||||
It("returns the slice in one chunk if len < chunkSize", func() {
|
||||
slice := []string{"a", "b", "c"}
|
||||
chunks := BreakUpStringSlice(slice, 10)
|
||||
Expect(chunks).To(HaveLen(1))
|
||||
Expect(chunks[0]).To(ConsistOf("a", "b", "c"))
|
||||
})
|
||||
It("breaks up the slice if len > chunkSize", func() {
|
||||
slice := []string{"a", "b", "c", "d", "e"}
|
||||
chunks := BreakUpStringSlice(slice, 3)
|
||||
Expect(chunks).To(HaveLen(2))
|
||||
Expect(chunks[0]).To(ConsistOf("a", "b", "c"))
|
||||
Expect(chunks[1]).To(ConsistOf("d", "e"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue