Enha: atomic global vars instead of mutexes

This commit is contained in:
Grail Finder
2026-03-07 11:26:07 +03:00
parent a842b00e96
commit 8c4d01ab3b
4 changed files with 75 additions and 70 deletions

48
bot.go
View File

@@ -22,7 +22,7 @@ import (
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
@@ -49,7 +49,6 @@ var (
//nolint:unused // TTS_ENABLED conditionally uses this
orator Orator
asr STT
localModelsMu sync.RWMutex
defaultLCPProps = map[string]float32{
"temperature": 0.8,
"dry_multiplier": 0.0,
@@ -64,11 +63,17 @@ var (
"google/gemma-3-27b-it:free",
"meta-llama/llama-3.3-70b-instruct:free",
}
LocalModels = []string{}
localModelsData *models.LCPModels
orModelsData *models.ORModels
LocalModels atomic.Value // stores []string
localModelsData atomic.Value // stores *models.LCPModels
orModelsData atomic.Value // stores *models.ORModels
)
func init() {
LocalModels.Store([]string{})
localModelsData.Store((*models.LCPModels)(nil))
orModelsData.Store((*models.ORModels)(nil))
}
var thinkBlockRE = regexp.MustCompile(`(?s)<think>.*?</think>`)
// parseKnownToTag extracts known_to list from content using configured tag.
@@ -356,7 +361,7 @@ func fetchORModels(free bool) ([]string, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
orModelsData = data
orModelsData.Store(data)
freeModels := data.ListModels(free)
return freeModels, nil
}
@@ -418,9 +423,7 @@ func fetchLCPModelsWithStatus() (*models.LCPModels, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
localModelsMu.Lock()
localModelsData = data
localModelsMu.Unlock()
localModelsData.Store(data)
return data, nil
}
@@ -1403,7 +1406,7 @@ func charToStart(agentName string, keepSysP bool) bool {
func updateModelLists() {
var err error
if cfg.OpenRouterToken != "" {
ORFreeModels, err = fetchORModels(true)
_, err := fetchORModels(true)
if err != nil {
logger.Warn("failed to fetch or models", "error", err)
}
@@ -1413,22 +1416,19 @@ func updateModelLists() {
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
}
localModelsMu.Lock()
LocalModels = ml
localModelsMu.Unlock()
LocalModels.Store(ml)
for statusLineWidget == nil {
time.Sleep(time.Millisecond * 100)
}
// set already loaded model in llama.cpp
if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") {
localModelsMu.Lock()
defer localModelsMu.Unlock()
for i := range LocalModels {
if strings.Contains(LocalModels[i], models.LoadedMark) {
m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
modelList := LocalModels.Load().([]string)
for i := range modelList {
if strings.Contains(modelList[i], models.LoadedMark) {
m := strings.TrimPrefix(modelList[i], models.LoadedMark)
cfg.CurrentModel = m
chatBody.Model = m
cachedModelColor = "green"
cachedModelColor.Store("green")
updateStatusLine()
updateToolCapabilities()
app.Draw()
@@ -1439,21 +1439,17 @@ func updateModelLists() {
}
func refreshLocalModelsIfEmpty() {
localModelsMu.RLock()
if len(LocalModels) > 0 {
localModelsMu.RUnlock()
models := LocalModels.Load().([]string)
if len(models) > 0 {
return
}
localModelsMu.RUnlock()
// try to fetch
models, err := fetchLCPModels()
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
return
}
localModelsMu.Lock()
LocalModels = models
localModelsMu.Unlock()
LocalModels.Store(models)
}
func summarizeAndStartNewChat() {

View File

@@ -16,11 +16,17 @@ import (
"time"
"unicode"
"sync/atomic"
"github.com/rivo/tview"
)
// Cached model color - updated by background goroutine
var cachedModelColor string = "orange"
var cachedModelColor atomic.Value // stores string
func init() {
cachedModelColor.Store("orange")
}
// startModelColorUpdater starts a background goroutine that periodically updates
// the cached model color. Only runs HTTP requests for local llama.cpp APIs.
@@ -39,20 +45,20 @@ func startModelColorUpdater() {
// updateCachedModelColor updates the global cachedModelColor variable
func updateCachedModelColor() {
if !isLocalLlamacpp() {
cachedModelColor = "orange"
cachedModelColor.Store("orange")
return
}
// Check if model is loaded
loaded, err := isModelLoaded(chatBody.GetModel())
if err != nil {
// On error, assume not loaded (red)
cachedModelColor = "red"
cachedModelColor.Store("red")
return
}
if loaded {
cachedModelColor = "green"
cachedModelColor.Store("green")
} else {
cachedModelColor = "red"
cachedModelColor.Store("red")
}
}
@@ -335,7 +341,7 @@ func isLocalLlamacpp() bool {
// The cached value is updated by a background goroutine every 5 seconds.
// For non-local models, returns orange. For local llama.cpp models, returns green if loaded, red if not.
func getModelColor() string {
return cachedModelColor
return cachedModelColor.Load().(string)
}
func makeStatusLine() string {
@@ -421,20 +427,27 @@ func getMaxContextTokens() int {
modelName := chatBody.GetModel()
switch {
case strings.Contains(cfg.CurrentAPI, "openrouter"):
if orModelsData != nil {
for i := range orModelsData.Data {
m := &orModelsData.Data[i]
ord := orModelsData.Load()
if ord != nil {
data := ord.(*models.ORModels)
if data != nil {
for i := range data.Data {
m := &data.Data[i]
if m.ID == modelName {
return m.ContextLength
}
}
}
}
case strings.Contains(cfg.CurrentAPI, "deepseek"):
return deepseekContext
default:
if localModelsData != nil {
for i := range localModelsData.Data {
m := &localModelsData.Data[i]
lmd := localModelsData.Load()
if lmd != nil {
data := lmd.(*models.LCPModels)
if data != nil {
for i := range data.Data {
m := &data.Data[i]
if m.ID == modelName {
for _, arg := range m.Status.Args {
if strings.HasPrefix(arg, "--ctx-size") {
@@ -463,6 +476,7 @@ func getMaxContextTokens() int {
}
}
}
}
return 0
}

View File

@@ -22,7 +22,7 @@ func showModelSelectionPopup() {
models, err := fetchLCPModelsWithLoadStatus()
if err != nil {
logger.Error("failed to fetch models with load status", "error", err)
return LocalModels
return LocalModels.Load().([]string)
}
return models
}
@@ -30,7 +30,8 @@ func showModelSelectionPopup() {
modelList := getModelListForAPI(cfg.CurrentAPI)
// Check for empty options list
if len(modelList) == 0 {
logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels))
localModels := LocalModels.Load().([]string)
logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(localModels), "orModelsLen", len(ORFreeModels))
var message string
switch {
case strings.Contains(cfg.CurrentAPI, "openrouter.ai"):
@@ -150,9 +151,7 @@ func showAPILinkSelectionPopup() {
}
// Assume local llama.cpp
refreshLocalModelsIfEmpty()
localModelsMu.RLock()
defer localModelsMu.RUnlock()
return LocalModels
return LocalModels.Load().([]string)
}
newModelList := getModelListForAPI(cfg.CurrentAPI)
// Ensure chatBody.Model is in the new list; if not, set to first available model

View File

@@ -4,14 +4,11 @@ import (
"fmt"
"strconv"
"strings"
"sync"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
var _ = sync.RWMutex{}
// Define constants for cell types
const (
CellTypeCheckbox = "checkbox"
@@ -157,9 +154,7 @@ func makePropsTable(props map[string]float32) *tview.Table {
}
// Assume local llama.cpp
refreshLocalModelsIfEmpty()
localModelsMu.RLock()
defer localModelsMu.RUnlock()
return LocalModels
return LocalModels.Load().([]string)
}
// Add input fields
addInputRow("New char to write msg as", "", func(text string) {
@@ -262,7 +257,8 @@ func makePropsTable(props map[string]float32) *tview.Table {
// Check for empty options list
if len(data.Options) == 0 {
logger.Warn("empty options list for", "label", label, "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels))
localModels := LocalModels.Load().([]string)
logger.Warn("empty options list for", "label", label, "api", cfg.CurrentAPI, "localModelsLen", len(localModels), "orModelsLen", len(ORFreeModels))
message := "No options available for " + label
if label == "Select a model" {
switch {