Enha: add (loaded) suffix if model is loaded

This commit is contained in:
Grail Finder
2026-02-21 20:42:43 +03:00
parent 66ccb7a732
commit 2c694e2b2b
2 changed files with 34 additions and 4 deletions

24
bot.go
View File

@@ -403,6 +403,23 @@ func fetchLCPModels() ([]string, error) {
return localModels, nil return localModels, nil
} }
// fetchLCPModelsWithLoadStatus returns models with "(loaded)" indicator for loaded models
func fetchLCPModelsWithLoadStatus() ([]string, error) {
models, err := fetchLCPModelsWithStatus()
if err != nil {
return nil, err
}
result := make([]string, 0, len(models.Data))
for _, m := range models.Data {
modelName := m.ID
if m.Status.Value == "loaded" {
modelName = modelName + " (loaded)"
}
result = append(result, modelName)
}
return result, nil
}
// fetchLCPModelsWithStatus returns the full LCPModels struct including status information. // fetchLCPModelsWithStatus returns the full LCPModels struct including status information.
func fetchLCPModelsWithStatus() (*models.LCPModels, error) { func fetchLCPModelsWithStatus() (*models.LCPModels, error) {
resp, err := http.Get(cfg.FetchModelNameAPI) resp, err := http.Get(cfg.FetchModelNameAPI)
@@ -832,6 +849,7 @@ func chatRound(r *models.ChatRoundReq) error {
// Variables for handling thinking blocks during streaming // Variables for handling thinking blocks during streaming
inThinkingBlock := false inThinkingBlock := false
thinkingBuffer := strings.Builder{} thinkingBuffer := strings.Builder{}
justExitedThinkingCollapsed := false
out: out:
for { for {
select { select {
@@ -859,6 +877,7 @@ out:
if thinkingCollapsed { if thinkingCollapsed {
// Thinking already displayed as placeholder, just update respText // Thinking already displayed as placeholder, just update respText
respText.WriteString(chunk) respText.WriteString(chunk)
justExitedThinkingCollapsed = true
if scrollToEndEnabled { if scrollToEndEnabled {
textView.ScrollToEnd() textView.ScrollToEnd()
} }
@@ -872,6 +891,11 @@ out:
} }
// If not collapsed, fall through to normal display // If not collapsed, fall through to normal display
} }
// Add spacing after collapsed thinking block before real response
if justExitedThinkingCollapsed {
chunk = "\n\n" + chunk
justExitedThinkingCollapsed = false
}
fmt.Fprint(textView, chunk) fmt.Fprint(textView, chunk)
respText.WriteString(chunk) respText.WriteString(chunk)
// Update the message in chatBody.Messages so it persists during Alt+T // Update the message in chatBody.Messages so it persists during Alt+T

View File

@@ -17,9 +17,13 @@ func showModelSelectionPopup() {
} else if strings.Contains(api, "openrouter.ai") { } else if strings.Contains(api, "openrouter.ai") {
return ORFreeModels return ORFreeModels
} }
// Assume local llama.cpp // Assume local llama.cpp - fetch with load status
updateModelLists() models, err := fetchLCPModelsWithLoadStatus()
return LocalModels if err != nil {
logger.Error("failed to fetch models with load status", "error", err)
return LocalModels
}
return models
} }
// Get the current model list based on the API // Get the current model list based on the API
modelList := getModelListForAPI(cfg.CurrentAPI) modelList := getModelListForAPI(cfg.CurrentAPI)
@@ -57,8 +61,10 @@ func showModelSelectionPopup() {
modelListWidget.SetCurrentItem(currentModelIndex) modelListWidget.SetCurrentItem(currentModelIndex)
} }
modelListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) { modelListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) {
// Strip "(loaded)" suffix if present for local llama.cpp models
modelName := strings.TrimSuffix(mainText, " (loaded)")
// Update the model in both chatBody and config // Update the model in both chatBody and config
chatBody.Model = mainText chatBody.Model = modelName
cfg.CurrentModel = chatBody.Model cfg.CurrentModel = chatBody.Model
// Remove the popup page // Remove the popup page
pages.RemovePage("modelSelectionPopup") pages.RemovePage("modelSelectionPopup")