Fix: model load if llama.cpp started after gf-lt

This commit is contained in:
Grail Finder
2025-12-20 14:21:40 +03:00
parent 0ca709b7c6
commit ba3330ee54
3 changed files with 34 additions and 0 deletions

5
tui.go
View File

@@ -12,11 +12,14 @@ import (
"path"
"strconv"
"strings"
"sync"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
var _ = sync.RWMutex{}
var (
app *tview.Application
pages *tview.Pages
@@ -988,11 +991,13 @@ func init() {
}
updateStatusLine()
} else {
localModelsMu.RLock()
if len(LocalModels) > 0 {
currentLocalModelIndex = (currentLocalModelIndex + 1) % len(LocalModels)
chatBody.Model = LocalModels[currentLocalModelIndex]
cfg.CurrentModel = chatBody.Model
}
localModelsMu.RUnlock()
updateStatusLine()
// // For non-OpenRouter APIs, use the old logic
// go func() {