Fix: model load if llama.cpp started after gf-lt

This commit is contained in:
Grail Finder
2025-12-20 14:21:40 +03:00
parent 0ca709b7c6
commit ba3330ee54
3 changed files with 34 additions and 0 deletions

View File

@@ -5,11 +5,14 @@ import (
"slices"
"strconv"
"strings"
"sync"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
var _ = sync.RWMutex{}
// Define constants for cell types
const (
CellTypeCheckbox = "checkbox"
@@ -138,6 +141,10 @@ func makePropsTable(props map[string]float32) *tview.Table {
} else if strings.Contains(api, "openrouter.ai") {
return ORFreeModels
}
// Assume local llama.cpp
refreshLocalModelsIfEmpty()
localModelsMu.RLock()
defer localModelsMu.RUnlock()
return LocalModels
}
var modelRowIndex int // will be set before model row is added