Enha: atomic global vars instead of mutexes

This commit is contained in:
Grail Finder
2026-03-07 11:26:07 +03:00
parent a842b00e96
commit 8c4d01ab3b
4 changed files with 75 additions and 70 deletions

48
bot.go
View File

@@ -22,7 +22,7 @@ import (
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
@@ -49,7 +49,6 @@ var (
//nolint:unused // TTS_ENABLED conditionally uses this
orator Orator
asr STT
localModelsMu sync.RWMutex
defaultLCPProps = map[string]float32{
"temperature": 0.8,
"dry_multiplier": 0.0,
@@ -64,11 +63,17 @@ var (
"google/gemma-3-27b-it:free",
"meta-llama/llama-3.3-70b-instruct:free",
}
LocalModels = []string{}
localModelsData *models.LCPModels
orModelsData *models.ORModels
LocalModels atomic.Value // stores []string
localModelsData atomic.Value // stores *models.LCPModels
orModelsData atomic.Value // stores *models.ORModels
)
func init() {
LocalModels.Store([]string{})
localModelsData.Store((*models.LCPModels)(nil))
orModelsData.Store((*models.ORModels)(nil))
}
var thinkBlockRE = regexp.MustCompile(`(?s)<think>.*?</think>`)
// parseKnownToTag extracts known_to list from content using configured tag.
@@ -356,7 +361,7 @@ func fetchORModels(free bool) ([]string, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
orModelsData = data
orModelsData.Store(data)
freeModels := data.ListModels(free)
return freeModels, nil
}
@@ -418,9 +423,7 @@ func fetchLCPModelsWithStatus() (*models.LCPModels, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
localModelsMu.Lock()
localModelsData = data
localModelsMu.Unlock()
localModelsData.Store(data)
return data, nil
}
@@ -1403,7 +1406,7 @@ func charToStart(agentName string, keepSysP bool) bool {
func updateModelLists() {
var err error
if cfg.OpenRouterToken != "" {
ORFreeModels, err = fetchORModels(true)
_, err := fetchORModels(true)
if err != nil {
logger.Warn("failed to fetch or models", "error", err)
}
@@ -1413,22 +1416,19 @@ func updateModelLists() {
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
}
localModelsMu.Lock()
LocalModels = ml
localModelsMu.Unlock()
LocalModels.Store(ml)
for statusLineWidget == nil {
time.Sleep(time.Millisecond * 100)
}
// set already loaded model in llama.cpp
if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") {
localModelsMu.Lock()
defer localModelsMu.Unlock()
for i := range LocalModels {
if strings.Contains(LocalModels[i], models.LoadedMark) {
m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
modelList := LocalModels.Load().([]string)
for i := range modelList {
if strings.Contains(modelList[i], models.LoadedMark) {
m := strings.TrimPrefix(modelList[i], models.LoadedMark)
cfg.CurrentModel = m
chatBody.Model = m
cachedModelColor = "green"
cachedModelColor.Store("green")
updateStatusLine()
updateToolCapabilities()
app.Draw()
@@ -1439,21 +1439,17 @@ func updateModelLists() {
}
func refreshLocalModelsIfEmpty() {
localModelsMu.RLock()
if len(LocalModels) > 0 {
localModelsMu.RUnlock()
models := LocalModels.Load().([]string)
if len(models) > 0 {
return
}
localModelsMu.RUnlock()
// try to fetch
models, err := fetchLCPModels()
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
return
}
localModelsMu.Lock()
LocalModels = models
localModelsMu.Unlock()
LocalModels.Store(models)
}
func summarizeAndStartNewChat() {