4 Commits

Author SHA1 Message Date
Grail Finder
2c495253c2 Chore: panic is not solved (WIP) 2026-03-07 09:40:58 +03:00
Grail Finder
118a0a0d55 Chore: cleanup logs 2026-03-07 09:08:01 +03:00
Grail Finder
44633d64c6 Chore: move to own file 2026-03-07 08:52:10 +03:00
Grail Finder
0598e3e86d Feat: kokoro onnx (WIP) 2026-03-07 08:35:44 +03:00
15 changed files with 695 additions and 579 deletions

View File

@@ -1,4 +1,4 @@
.PHONY: setconfig run lint lintall install-linters setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run installdelve checkdelve fetch-onnx install-onnx-deps
.PHONY: setconfig run lint lintall install-linters setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run installdelve checkdelve fetch-onnx install-onnx-deps fetch-kokoro-voices install-espeak
run: setconfig
go build -tags extra -o gf-lt && ./gf-lt
@@ -33,6 +33,9 @@ lintall: lint
fetch-onnx:
mkdir -p onnx/embedgemma && curl -o onnx/embedgemma/config.json -L https://huggingface.co/onnx-community/embeddinggemma-300m-ONNX/resolve/main/config.json && curl -o onnx/embedgemma/tokenizer.json -L https://huggingface.co/onnx-community/embeddinggemma-300m-ONNX/resolve/main/tokenizer.json && curl -o onnx/embedgemma/model_q4.onnx -L https://huggingface.co/onnx-community/embeddinggemma-300m-ONNX/resolve/main/onnx/model_q4.onnx && curl -o onnx/embedgemma/model_q4.onnx_data -L https://huggingface.co/onnx-community/embeddinggemma-300m-ONNX/resolve/main/onnx/model_q4.onnx_data?download=true
fetch-kokoro-onnx:
mkdir -p onnx/kokoro && curl -o onnx/kokoro/config.json -L https://huggingface.co/onnx-community/Kokoro-82M-v1.0-ONNX/resolve/main/config.json && curl -o onnx/kokoro/tokenizer.json -L https://huggingface.co/onnx-community/Kokoro-82M-v1.0-ONNX/resolve/main/tokenizer.json && curl -o onnx/kokoro/model_quantized.onnx -L https://huggingface.co/onnx-community/Kokoro-82M-v1.0-ONNX/resolve/main/onnx/model_quantized.onnx && curl -o onnx/kokoro/voices.bin -L https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files-v1.0/voices-v1.0.bin
install-onnx-deps: ## Install ONNX Runtime with CUDA support (or CPU fallback)
@echo "=== ONNX Runtime Installer ===" && \
echo "" && \
@@ -194,3 +197,25 @@ docker-logs-whisper: ## View logs from Whisper STT service only
docker-logs-kokoro: ## View logs from Kokoro TTS service only
@echo "Displaying logs from Kokoro TTS service..."
docker-compose -f batteries/docker-compose.yml logs -f kokoro-tts
# Kokoro ONNX TTS Setup
install-espeak: ## Install espeak-ng for phoneme tokenization
@echo "=== Installing espeak-ng ===" && \
if command -v espeak-ng >/dev/null 2>&1; then \
echo "espeak-ng is already installed:" && \
espeak-ng --version && \
exit 0; \
fi && \
echo "Installing espeak-ng..." && \
sudo apt-get update && \
sudo apt-get install -y espeak-ng espeak && \
echo "espeak-ng installed successfully!" && \
espeak-ng --version
fetch-kokoro-voices: ## Download Kokoro voice files (PyTorch format)
@echo "=== Downloading Kokoro voices ===" && \
mkdir -p onnx/kokoro/voices && \
echo "Downloading af_bella voice..." && \
curl -L -o onnx/kokoro/voices/af_bella.pt https://raw.githubusercontent.com/hexgrad/kokoro/main/kokoro/voices/af_heart.pt && \
echo "Voice file downloaded to onnx/kokoro/voices/" && \
ls -lh onnx/kokoro/voices/

160
bot.go
View File

@@ -22,7 +22,7 @@ import (
"slices"
"strconv"
"strings"
"sync/atomic"
"sync"
"time"
)
@@ -37,7 +37,7 @@ var (
chunkChan = make(chan string, 10)
openAIToolChan = make(chan string, 10)
streamDone = make(chan bool, 1)
chatBody *models.SafeChatBody
chatBody *models.ChatBody
store storage.FullRepo
defaultFirstMsg = "Hello! What can I do for you?"
defaultStarter = []models.RoleMsg{}
@@ -49,6 +49,7 @@ var (
//nolint:unused // TTS_ENABLED conditionally uses this
orator Orator
asr STT
localModelsMu sync.RWMutex
defaultLCPProps = map[string]float32{
"temperature": 0.8,
"dry_multiplier": 0.0,
@@ -63,17 +64,11 @@ var (
"google/gemma-3-27b-it:free",
"meta-llama/llama-3.3-70b-instruct:free",
}
LocalModels atomic.Value // stores []string
localModelsData atomic.Value // stores *models.LCPModels
orModelsData atomic.Value // stores *models.ORModels
LocalModels = []string{}
localModelsData *models.LCPModels
orModelsData *models.ORModels
)
func init() {
LocalModels.Store([]string{})
localModelsData.Store((*models.LCPModels)(nil))
orModelsData.Store((*models.ORModels)(nil))
}
var thinkBlockRE = regexp.MustCompile(`(?s)<think>.*?</think>`)
// parseKnownToTag extracts known_to list from content using configured tag.
@@ -267,13 +262,13 @@ func warmUpModel() {
return
}
// Check if model is already loaded
loaded, err := isModelLoaded(chatBody.GetModel())
loaded, err := isModelLoaded(chatBody.Model)
if err != nil {
logger.Debug("failed to check model status", "model", chatBody.GetModel(), "error", err)
logger.Debug("failed to check model status", "model", chatBody.Model, "error", err)
// Continue with warmup attempt anyway
}
if loaded {
showToast("model already loaded", "Model "+chatBody.GetModel()+" is already loaded.")
showToast("model already loaded", "Model "+chatBody.Model+" is already loaded.")
return
}
go func() {
@@ -282,7 +277,7 @@ func warmUpModel() {
switch {
case strings.HasSuffix(cfg.CurrentAPI, "/completion"):
// Old completion endpoint
req := models.NewLCPReq(".", chatBody.GetModel(), nil, map[string]float32{
req := models.NewLCPReq(".", chatBody.Model, nil, map[string]float32{
"temperature": 0.8,
"dry_multiplier": 0.0,
"min_p": 0.05,
@@ -294,7 +289,7 @@ func warmUpModel() {
// OpenAI-compatible chat endpoint
req := models.OpenAIReq{
ChatBody: &models.ChatBody{
Model: chatBody.GetModel(),
Model: chatBody.Model,
Messages: []models.RoleMsg{
{Role: "system", Content: "."},
},
@@ -318,7 +313,7 @@ func warmUpModel() {
}
resp.Body.Close()
// Start monitoring for model load completion
monitorModelLoad(chatBody.GetModel())
monitorModelLoad(chatBody.Model)
}()
}
@@ -361,7 +356,7 @@ func fetchORModels(free bool) ([]string, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
orModelsData.Store(data)
orModelsData = data
freeModels := data.ListModels(free)
return freeModels, nil
}
@@ -423,7 +418,9 @@ func fetchLCPModelsWithStatus() (*models.LCPModels, error) {
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
localModelsData.Store(data)
localModelsMu.Lock()
localModelsData = data
localModelsMu.Unlock()
return data, nil
}
@@ -826,10 +823,10 @@ func chatRound(r *models.ChatRoundReq) error {
}
go sendMsgToLLM(reader)
logger.Debug("looking at vars in chatRound", "msg", r.UserMsg, "regen", r.Regen, "resume", r.Resume)
msgIdx := chatBody.GetMessageCount()
msgIdx := len(chatBody.Messages)
if !r.Resume {
// Add empty message to chatBody immediately so it persists during Alt+T toggle
chatBody.AppendMessage(models.RoleMsg{
chatBody.Messages = append(chatBody.Messages, models.RoleMsg{
Role: botPersona, Content: "",
})
nl := "\n\n"
@@ -841,7 +838,7 @@ func chatRound(r *models.ChatRoundReq) error {
}
fmt.Fprintf(textView, "%s[-:-:b](%d) %s[-:-:-]\n", nl, msgIdx, roleToIcon(botPersona))
} else {
msgIdx = chatBody.GetMessageCount() - 1
msgIdx = len(chatBody.Messages) - 1
}
respText := strings.Builder{}
toolResp := strings.Builder{}
@@ -898,10 +895,7 @@ out:
fmt.Fprint(textView, chunk)
respText.WriteString(chunk)
// Update the message in chatBody.Messages so it persists during Alt+T
chatBody.UpdateMessageFunc(msgIdx, func(msg models.RoleMsg) models.RoleMsg {
msg.Content = respText.String()
return msg
})
chatBody.Messages[msgIdx].Content = respText.String()
if scrollToEndEnabled {
textView.ScrollToEnd()
}
@@ -944,32 +938,29 @@ out:
}
botRespMode = false
if r.Resume {
chatBody.UpdateMessageFunc(chatBody.GetMessageCount()-1, func(msg models.RoleMsg) models.RoleMsg {
msg.Content += respText.String()
processedMsg := processMessageTag(&msg)
if msgStats != nil && processedMsg.Role != cfg.ToolRole {
processedMsg.Stats = msgStats
}
return *processedMsg
})
chatBody.Messages[len(chatBody.Messages)-1].Content += respText.String()
updatedMsg := chatBody.Messages[len(chatBody.Messages)-1]
processedMsg := processMessageTag(&updatedMsg)
chatBody.Messages[len(chatBody.Messages)-1] = *processedMsg
if msgStats != nil && chatBody.Messages[len(chatBody.Messages)-1].Role != cfg.ToolRole {
chatBody.Messages[len(chatBody.Messages)-1].Stats = msgStats
}
} else {
chatBody.UpdateMessageFunc(msgIdx, func(msg models.RoleMsg) models.RoleMsg {
msg.Content = respText.String()
processedMsg := processMessageTag(&msg)
if msgStats != nil && processedMsg.Role != cfg.ToolRole {
processedMsg.Stats = msgStats
}
return *processedMsg
})
stopTTSIfNotForUser(&chatBody.GetMessages()[msgIdx])
chatBody.Messages[msgIdx].Content = respText.String()
processedMsg := processMessageTag(&chatBody.Messages[msgIdx])
chatBody.Messages[msgIdx] = *processedMsg
if msgStats != nil && chatBody.Messages[msgIdx].Role != cfg.ToolRole {
chatBody.Messages[msgIdx].Stats = msgStats
}
stopTTSIfNotForUser(&chatBody.Messages[msgIdx])
}
cleanChatBody()
refreshChatDisplay()
updateStatusLine()
// bot msg is done;
// now check it for func call
// logChat(activeChatName, chatBody.GetMessages())
if err := updateStorageChat(activeChatName, chatBody.GetMessages()); err != nil {
// logChat(activeChatName, chatBody.Messages)
if err := updateStorageChat(activeChatName, chatBody.Messages); err != nil {
logger.Warn("failed to update storage", "error", err, "name", activeChatName)
}
// Strip think blocks before parsing for tool calls
@@ -984,8 +975,8 @@ out:
// If so, trigger those characters to respond if that char is not controlled by user
// perhaps we should have narrator role to determine which char is next to act
if cfg.AutoTurn {
lastMsg, ok := chatBody.GetLastMessage()
if ok && len(lastMsg.KnownTo) > 0 {
lastMsg := chatBody.Messages[len(chatBody.Messages)-1]
if len(lastMsg.KnownTo) > 0 {
triggerPrivateMessageResponses(&lastMsg)
}
}
@@ -994,15 +985,13 @@ out:
// cleanChatBody removes messages with null or empty content to prevent API issues
func cleanChatBody() {
if chatBody == nil || chatBody.GetMessageCount() == 0 {
if chatBody == nil || chatBody.Messages == nil {
return
}
// Tool request cleaning is now configurable via AutoCleanToolCallsFromCtx (default false)
// /completion msg where part meant for user and other part tool call
// chatBody.Messages = cleanToolCalls(chatBody.Messages)
chatBody.WithLock(func(cb *models.ChatBody) {
cb.Messages = consolidateAssistantMessages(cb.Messages)
})
chatBody.Messages = consolidateAssistantMessages(chatBody.Messages)
}
// convertJSONToMapStringString unmarshals JSON into map[string]interface{} and converts all values to strings.
@@ -1102,7 +1091,7 @@ func findCall(msg, toolCall string) bool {
Content: fmt.Sprintf("Error processing tool call: %v. Please check the JSON format and try again.", err),
ToolCallID: lastToolCall.ID, // Use the stored tool call ID
}
chatBody.AppendMessage(toolResponseMsg)
chatBody.Messages = append(chatBody.Messages, toolResponseMsg)
// Clear the stored tool call ID after using it (no longer needed)
// Trigger the assistant to continue processing with the error message
crr := &models.ChatRoundReq{
@@ -1139,7 +1128,7 @@ func findCall(msg, toolCall string) bool {
Role: cfg.ToolRole,
Content: "Error processing tool call: no valid JSON found. Please check the JSON format.",
}
chatBody.AppendMessage(toolResponseMsg)
chatBody.Messages = append(chatBody.Messages, toolResponseMsg)
crr := &models.ChatRoundReq{
Role: cfg.AssistantRole,
}
@@ -1156,8 +1145,8 @@ func findCall(msg, toolCall string) bool {
Role: cfg.ToolRole,
Content: fmt.Sprintf("Error processing tool call: %v. Please check the JSON format and try again.", err),
}
chatBody.AppendMessage(toolResponseMsg)
logger.Debug("findCall: added tool error response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "message_count_after_add", chatBody.GetMessageCount())
chatBody.Messages = append(chatBody.Messages, toolResponseMsg)
logger.Debug("findCall: added tool error response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "message_count_after_add", len(chatBody.Messages))
// Trigger the assistant to continue processing with the error message
// chatRound("", cfg.AssistantRole, tv, false, false)
crr := &models.ChatRoundReq{
@@ -1175,23 +1164,17 @@ func findCall(msg, toolCall string) bool {
// we got here => last msg recognized as a tool call (correct or not)
// Use the tool call ID from streaming response (lastToolCall.ID)
// Don't generate random ID - the ID should match between assistant message and tool response
lastMsgIdx := chatBody.GetMessageCount() - 1
lastMsgIdx := len(chatBody.Messages) - 1
if lastToolCall.ID != "" {
chatBody.UpdateMessageFunc(lastMsgIdx, func(msg models.RoleMsg) models.RoleMsg {
msg.ToolCallID = lastToolCall.ID
return msg
})
chatBody.Messages[lastMsgIdx].ToolCallID = lastToolCall.ID
}
// Store tool call info in the assistant message
// Convert Args map to JSON string for storage
chatBody.UpdateMessageFunc(lastMsgIdx, func(msg models.RoleMsg) models.RoleMsg {
msg.ToolCall = &models.ToolCall{
ID: lastToolCall.ID,
Name: lastToolCall.Name,
Args: mapToString(lastToolCall.Args),
}
return msg
})
chatBody.Messages[lastMsgIdx].ToolCall = &models.ToolCall{
ID: lastToolCall.ID,
Name: lastToolCall.Name,
Args: mapToString(lastToolCall.Args),
}
// call a func
_, ok := fnMap[fc.Name]
if !ok {
@@ -1202,8 +1185,8 @@ func findCall(msg, toolCall string) bool {
Content: m,
ToolCallID: lastToolCall.ID, // Use the stored tool call ID
}
chatBody.AppendMessage(toolResponseMsg)
logger.Debug("findCall: added tool not implemented response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "tool_call_id", toolResponseMsg.ToolCallID, "message_count_after_add", chatBody.GetMessageCount())
chatBody.Messages = append(chatBody.Messages, toolResponseMsg)
logger.Debug("findCall: added tool not implemented response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "tool_call_id", toolResponseMsg.ToolCallID, "message_count_after_add", len(chatBody.Messages))
// Clear the stored tool call ID after using it
lastToolCall.ID = ""
// Trigger the assistant to continue processing with the new tool response
@@ -1274,9 +1257,9 @@ func findCall(msg, toolCall string) bool {
}
}
fmt.Fprintf(textView, "%s[-:-:b](%d) <%s>: [-:-:-]\n%s\n",
"\n\n", chatBody.GetMessageCount(), cfg.ToolRole, toolResponseMsg.GetText())
chatBody.AppendMessage(toolResponseMsg)
logger.Debug("findCall: added actual tool response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "tool_call_id", toolResponseMsg.ToolCallID, "message_count_after_add", chatBody.GetMessageCount())
"\n\n", len(chatBody.Messages), cfg.ToolRole, toolResponseMsg.GetText())
chatBody.Messages = append(chatBody.Messages, toolResponseMsg)
logger.Debug("findCall: added actual tool response", "role", toolResponseMsg.Role, "content_len", len(toolResponseMsg.Content), "tool_call_id", toolResponseMsg.ToolCallID, "message_count_after_add", len(chatBody.Messages))
// Clear the stored tool call ID after using it
lastToolCall.ID = ""
// Trigger the assistant to continue processing with the new tool response
@@ -1406,7 +1389,7 @@ func charToStart(agentName string, keepSysP bool) bool {
func updateModelLists() {
var err error
if cfg.OpenRouterToken != "" {
_, err := fetchORModels(true)
ORFreeModels, err = fetchORModels(true)
if err != nil {
logger.Warn("failed to fetch or models", "error", err)
}
@@ -1416,19 +1399,22 @@ func updateModelLists() {
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
}
LocalModels.Store(ml)
localModelsMu.Lock()
LocalModels = ml
localModelsMu.Unlock()
for statusLineWidget == nil {
time.Sleep(time.Millisecond * 100)
}
// set already loaded model in llama.cpp
if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") {
modelList := LocalModels.Load().([]string)
for i := range modelList {
if strings.Contains(modelList[i], models.LoadedMark) {
m := strings.TrimPrefix(modelList[i], models.LoadedMark)
localModelsMu.Lock()
defer localModelsMu.Unlock()
for i := range LocalModels {
if strings.Contains(LocalModels[i], models.LoadedMark) {
m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
cfg.CurrentModel = m
chatBody.Model = m
cachedModelColor.Store("green")
cachedModelColor = "green"
updateStatusLine()
updateToolCapabilities()
app.Draw()
@@ -1439,17 +1425,21 @@ func updateModelLists() {
}
func refreshLocalModelsIfEmpty() {
models := LocalModels.Load().([]string)
if len(models) > 0 {
localModelsMu.RLock()
if len(LocalModels) > 0 {
localModelsMu.RUnlock()
return
}
localModelsMu.RUnlock()
// try to fetch
models, err := fetchLCPModels()
if err != nil {
logger.Warn("failed to fetch llama.cpp models", "error", err)
return
}
LocalModels.Store(models)
localModelsMu.Lock()
LocalModels = models
localModelsMu.Unlock()
}
func summarizeAndStartNewChat() {
@@ -1533,11 +1523,11 @@ func init() {
}
lastToolCall = &models.FuncCall{}
lastChat := loadOldChatOrGetNew()
chatBody = models.NewSafeChatBody(&models.ChatBody{
chatBody = &models.ChatBody{
Model: "modelname",
Stream: true,
Messages: lastChat,
})
}
choseChunkParser()
httpClient = createClient(time.Second * 90)
if cfg.TTS_ENABLED {

View File

@@ -61,6 +61,10 @@ type Config struct {
TTS_SPEED float32 `toml:"TTS_SPEED"`
TTS_PROVIDER string `toml:"TTS_PROVIDER"`
TTS_LANGUAGE string `toml:"TTS_LANGUAGE"`
// Kokoro ONNX TTS
KokoroModelPath string `toml:"KokoroModelPath"`
KokoroVoicesPath string `toml:"KokoroVoicesPath"`
KokoroVoice string `toml:"KokoroVoice"`
// STT
STT_TYPE string `toml:"STT_TYPE"` // WHISPER_SERVER, WHISPER_BINARY
STT_URL string `toml:"STT_URL"`

421
extra/kokoro_onnx.go Normal file
View File

@@ -0,0 +1,421 @@
//go:build extra
// +build extra
package extra
import (
"bytes"
"fmt"
"gf-lt/models"
"gf-lt/onnx"
"log/slog"
"os/exec"
"strings"
"sync"
"time"
"github.com/gopxl/beep/v2"
"github.com/gopxl/beep/v2/speaker"
"github.com/gopxl/beep/v2/wav"
"github.com/neurosnap/sentences/english"
"github.com/yalue/onnxruntime_go"
)
// KokoroONNXOrator implements Kokoro TTS using ONNX runtime
type KokoroONNXOrator struct {
logger *slog.Logger
mu sync.Mutex
session *onnxruntime_go.DynamicAdvancedSession
phonemeMap map[string]int
espeakCmd string
voice string
speed float32
styleVector []float32
currentStream *beep.Ctrl
currentDone chan bool
textBuffer strings.Builder
interrupt bool
modelLoaded bool
modelPath string
voicesPath string
}
// Phoneme to token ID mapping from Kokoro tokenizer.json
var kokoroPhonemeMap = map[string]int{
"$": 0, ";": 1, ":": 2, ",": 3, ".": 4, "!": 5, "?": 6, "—": 9, "…": 10, "\"": 11, "(": 12, ")": 13, "“": 14, "”": 15, " ": 16, "̃": 17, "ˢ": 18, "ˤ": 19, "˦": 20, "˨": 21, "ᾝ": 22, "⭧": 23,
"A": 24, "I": 25, "O": 31, "Q": 33, "S": 35, "T": 36, "W": 39, "Y": 41, "ʲ": 42,
"a": 43, "b": 44, "c": 45, "d": 46, "e": 47, "f": 48, "h": 50, "i": 51, "j": 52, "k": 53, "l": 54, "m": 55, "n": 56, "o": 57, "p": 58, "q": 59, "r": 60, "s": 61, "t": 62, "u": 63, "v": 64, "w": 65, "x": 66, "y": 67, "z": 68,
"ɑ": 69, "ɐ": 70, "ɒ": 71, "æ": 72, "β": 75, "ɔ": 76, "ɕ": 77, "ç": 78, "ɖ": 80, "ð": 81, "˔": 82, "ə": 83, "ɚ": 85, "ɛ": 86, "ɜ": 87, "ɟ": 90, "ɡ": 92, "ɥ": 99, "ɨ": 101, "ɪ": 102, "ɝ": 103, "ɯ": 110, "ɰ": 111, "ŋ": 112, "ɳ": 113, "ɲ": 114, "ɴ": 115, "ø": 116, "ɸ": 118, "θ": 119, "œ": 120, "ɹ": 123, "ɾ": 125, "ɺ": 126, "ʁ": 128, "ɽ": 129, "ʂ": 130, "ʃ": 131, "ʈ": 132, "˧": 133, "ʊ": 135, "ʋ": 136, "ʌ": 138, "ɢ": 139, "ɣ": 140, "χ": 142, "ʎ": 143, "ʒ": 147, "ʔ": 148,
"ˈ": 156, "ˌ": 157, "ː": 158, "̰": 162, "̊": 164, "↕": 169, "→": 171, "↗": 172, "↘": 173, "ᶻ": 177,
}
func (o *KokoroONNXOrator) ensureInitialized(modelPath string) error {
if o.modelLoaded {
return nil
}
o.mu.Lock()
defer o.mu.Unlock()
if o.modelLoaded {
return nil
}
if modelPath == "" {
o.logger.Error("modelPath is empty, cannot load ONNX model")
return fmt.Errorf("modelPath is empty, set KokoroModelPath in config")
}
// Initialize ONNX runtime (shared with embedder)
if err := onnx.Init(); err != nil {
o.logger.Error("ONNX init failed", "error", err)
return fmt.Errorf("ONNX init failed: %w", err)
}
if onnx.HasCUDASupport() {
o.logger.Info("ONNX using CUDA")
} else {
o.logger.Info("ONNX using CPU fallback")
}
if o.phonemeMap == nil {
o.phonemeMap = kokoroPhonemeMap
}
if o.espeakCmd == "" {
o.espeakCmd = "espeak-ng"
if _, err := exec.LookPath(o.espeakCmd); err != nil {
o.espeakCmd = "espeak"
if _, err := exec.LookPath(o.espeakCmd); err != nil {
return fmt.Errorf("espeak-ng or espeak not found. Install with: sudo apt-get install espeak-ng")
}
}
}
o.logger.Info("using espeak command", "cmd", o.espeakCmd)
// Load voice embedding if not already loaded
if o.styleVector == nil {
voiceName := o.voice
if voiceName == "" {
voiceName = "af_bella"
}
if o.voicesPath != "" {
styleVec, err := onnx.LoadVoice(o.voicesPath, voiceName)
if err != nil {
o.logger.Warn("failed to load voice, using zeros", "error", err, "voice", voiceName)
o.styleVector = make([]float32, 256)
} else {
// Shape is (510, 1, 256), we want the last 256 values (or first? let's use mean or just pick one)
// Actually, let's average across all 510 to get a single 256-dim vector
if len(styleVec) != 510*256 {
o.logger.Error("voice embedding has unexpected size", "len", len(styleVec))
err = fmt.Errorf("voice embedding has unexpected size", "len", len(styleVec))
return err
}
o.styleVector = make([]float32, 256)
for i := 0; i < 256; i++ {
var sum float32
for j := 0; j < 510; j++ {
sum += styleVec[j*256+i]
}
o.styleVector[i] = sum / 510.0
}
o.logger.Info("loaded voice embedding", "voice", voiceName)
}
} else {
o.logger.Warn("no voices path configured, using zeros for style")
o.styleVector = make([]float32, 256)
}
}
opts, err := onnx.NewSessionOptions()
if err != nil {
return fmt.Errorf("failed to create session options: %w", err)
}
defer func() { _ = opts.Destroy() }()
if onnx.HasCUDASupport() {
o.logger.Info("session options created with CUDA")
} else {
o.logger.Info("session options created with CPU")
}
session, err := onnxruntime_go.NewDynamicAdvancedSession(
modelPath,
[]string{"input_ids", "style", "speed"},
[]string{"waveform"},
opts,
)
if err != nil {
o.logger.Error("failed to create ONNX session", "error", err)
return fmt.Errorf("failed to create ONNX session: %w", err)
}
o.session = session
o.modelLoaded = true
o.logger.Info("Kokoro ONNX model loaded successfully", "model", modelPath)
return nil
}
func (o *KokoroONNXOrator) textToPhonemes(text string) (string, error) {
cmd := exec.Command(o.espeakCmd, "-x", "-q", text)
output, err := cmd.Output()
if err != nil {
o.logger.Error("espeak failed", "error", err, "cmd", o.espeakCmd, "text", text)
return "", fmt.Errorf("espeak failed: %w", err)
}
phonemeStr := strings.TrimSpace(string(output))
return phonemeStr, nil
}
func (o *KokoroONNXOrator) phonemesToTokens(phonemeStr string) ([]int, error) {
if phonemeStr == "" {
o.logger.Error("empty phoneme string")
return nil, fmt.Errorf("empty phoneme string")
}
// Iterate over each character in the phoneme string
tokens := make([]int, 0)
for _, ch := range phonemeStr {
chStr := string(ch)
if tokenID, ok := o.phonemeMap[chStr]; ok {
tokens = append(tokens, tokenID)
}
}
if len(tokens) == 0 {
o.logger.Error("no phonemes mapped to tokens", "phonemeStr", phonemeStr)
return nil, fmt.Errorf("no valid phonemes mapped to tokens")
}
return tokens, nil
}
func (o *KokoroONNXOrator) generateAudio(text string) ([]float32, error) {
if err := o.ensureInitialized(o.modelPath); err != nil {
o.logger.Error("ensureInitialized failed", "error", err)
return nil, err
}
phonemeStr, err := o.textToPhonemes(text)
if err != nil {
o.logger.Error("phoneme conversion failed", "error", err)
return nil, fmt.Errorf("phoneme conversion failed: %w", err)
}
tokens, err := o.phonemesToTokens(phonemeStr)
if err != nil {
o.logger.Error("token conversion failed", "error", err)
return nil, fmt.Errorf("token conversion failed: %w", err)
}
if len(tokens) > 510 {
return nil, fmt.Errorf("text too long: %d tokens (max 510)", len(tokens))
}
tokens = append([]int{0}, tokens...)
tokens = append(tokens, 0)
inputIDs := make([]int64, len(tokens))
for i, t := range tokens {
inputIDs[i] = int64(t)
}
inputTensor, err := onnxruntime_go.NewTensor[int64](
onnxruntime_go.NewShape(1, int64(len(inputIDs))),
inputIDs,
)
if err != nil {
o.logger.Error("failed to create input tensor", "error", err)
return nil, fmt.Errorf("failed to create input tensor: %w", err)
}
defer func() { _ = inputTensor.Destroy() }()
styleTensor, err := onnxruntime_go.NewTensor[float32](
onnxruntime_go.NewShape(1, 256),
o.styleVector,
)
if err != nil {
o.logger.Error("failed to create style tensor", "error", err)
return nil, fmt.Errorf("failed to create style tensor: %w", err)
}
defer func() { _ = styleTensor.Destroy() }()
speedTensor, err := onnxruntime_go.NewTensor[float32](
onnxruntime_go.NewShape(1),
[]float32{o.speed},
)
if err != nil {
o.logger.Error("failed to create speed tensor", "error", err)
return nil, fmt.Errorf("failed to create speed tensor: %w", err)
}
defer func() { _ = speedTensor.Destroy() }()
outputTensor, err := onnxruntime_go.NewEmptyTensor[float32](
onnxruntime_go.NewShape(1, 512),
)
if err != nil {
o.logger.Error("failed to create output tensor", "error", err)
return nil, fmt.Errorf("failed to create output tensor: %w", err)
}
defer func() { _ = outputTensor.Destroy() }()
err = o.session.Run(
[]onnxruntime_go.Value{inputTensor, styleTensor, speedTensor},
[]onnxruntime_go.Value{outputTensor},
)
if err != nil {
o.logger.Error("ONNX inference failed", "error", err)
return nil, fmt.Errorf("ONNX inference failed: %w", err)
}
audioData := outputTensor.GetData()
if len(audioData) == 0 {
o.logger.Error("empty audio output from ONNX")
return nil, fmt.Errorf("empty audio output")
}
audio := make([]float32, len(audioData))
copy(audio, audioData)
return audio, nil
}
func (o *KokoroONNXOrator) Speak(text string) error {
audio, err := o.generateAudio(text)
if err != nil {
o.logger.Error("audio generation failed", "error", err)
return fmt.Errorf("audio generation failed: %w", err)
}
// Create streamer for encoding
encodeStreamer := beep.StreamerFunc(func(samples [][2]float64) (n int, ok bool) {
for i := range samples {
if i >= len(audio) {
return i, false
}
samples[i][0] = float64(audio[i])
samples[i][1] = float64(audio[i])
}
return len(audio), true
})
buf := &seekableBuffer{new(bytes.Buffer)}
err = wav.Encode(buf, encodeStreamer, beep.Format{
SampleRate: 24000,
NumChannels: 1,
Precision: 2,
})
if err != nil {
o.logger.Error("wav encoding failed", "error", err)
return fmt.Errorf("wav encoding failed: %w", err)
}
decodedStreamer, format, err := wav.Decode(bytes.NewReader(buf.Bytes()))
if err != nil {
o.logger.Error("wav decode failed", "error", err)
return fmt.Errorf("wav decode failed: %w", err)
}
defer decodedStreamer.Close()
if err := speaker.Init(format.SampleRate, format.SampleRate.N(time.Second/10)); err != nil {
o.logger.Error("speaker init failed", "error", err)
return fmt.Errorf("speaker init failed: %w", err)
}
o.logger.Info("playing audio", "sampleRate", format.SampleRate, "channels", format.NumChannels)
done := make(chan bool)
o.mu.Lock()
o.currentDone = done
o.currentStream = &beep.Ctrl{Streamer: beep.Seq(decodedStreamer, beep.Callback(func() {
o.mu.Lock()
close(done)
o.currentStream = nil
o.currentDone = nil
o.mu.Unlock()
})), Paused: false}
o.mu.Unlock()
speaker.Play(o.currentStream)
<-done
return nil
}
func (o *KokoroONNXOrator) Stop() {
speaker.Lock()
defer speaker.Unlock()
o.mu.Lock()
defer o.mu.Unlock()
if o.currentStream != nil {
o.currentStream.Streamer = nil
}
}
func (o *KokoroONNXOrator) GetLogger() *slog.Logger {
return o.logger
}
func (o *KokoroONNXOrator) stoproutine() {
for {
<-TTSDoneChan
o.Stop()
for len(TTSTextChan) > 0 {
<-TTSTextChan
}
o.mu.Lock()
o.textBuffer.Reset()
if o.currentDone != nil {
select {
case o.currentDone <- true:
default:
}
}
o.interrupt = true
o.mu.Unlock()
}
}
func (o *KokoroONNXOrator) readroutine() {
tokenizer, _ := english.NewSentenceTokenizer(nil)
for {
select {
case chunk := <-TTSTextChan:
o.mu.Lock()
o.interrupt = false
_, err := o.textBuffer.WriteString(chunk)
if err != nil {
o.logger.Warn("failed to write to buffer", "error", err)
o.mu.Unlock()
continue
}
text := o.textBuffer.String()
sentences := tokenizer.Tokenize(text)
if len(sentences) <= 1 {
o.mu.Unlock()
continue
}
completeSentences := sentences[:len(sentences)-1]
remaining := sentences[len(sentences)-1].Text
o.textBuffer.Reset()
o.textBuffer.WriteString(remaining)
o.mu.Unlock()
for _, sentence := range completeSentences {
o.mu.Lock()
interrupted := o.interrupt
o.mu.Unlock()
if interrupted {
return
}
cleanedText := models.CleanText(sentence.Text)
if cleanedText == "" {
continue
}
o.logger.Info("KokoroONNX speak", "text", cleanedText)
if err := o.Speak(cleanedText); err != nil {
o.logger.Error("KokoroONNX tts failed", "text", cleanedText, "error", err)
}
}
case <-TTSFlushChan:
if len(TTSTextChan) > 0 {
for chunk := range TTSTextChan {
o.mu.Lock()
_, err := o.textBuffer.WriteString(chunk)
o.mu.Unlock()
if err != nil {
continue
}
if len(TTSTextChan) == 0 {
break
}
}
}
o.mu.Lock()
remaining := o.textBuffer.String()
remaining = models.CleanText(remaining)
o.textBuffer.Reset()
o.mu.Unlock()
if remaining == "" {
continue
}
sentencesRem := tokenizer.Tokenize(remaining)
for _, rs := range sentencesRem {
o.mu.Lock()
interrupt := o.interrupt
o.mu.Unlock()
if interrupt {
break
}
if err := o.Speak(rs.Text); err != nil {
o.logger.Error("tts failed", "text", rs.Text, "error", err)
}
}
}
}
}

View File

@@ -32,6 +32,14 @@ var (
// endsWithPunctuation = regexp.MustCompile(`[;.!?]$`)
)
type seekableBuffer struct {
*bytes.Buffer
}
func (s *seekableBuffer) Seek(offset int64, whence int) (int64, error) {
return 0, nil
}
type Orator interface {
Speak(text string) error
Stop()
@@ -194,6 +202,18 @@ func NewOrator(log *slog.Logger, cfg *config.Config) Orator {
go orator.readroutine()
go orator.stoproutine()
return orator
case "kokoro_onnx":
log.Info("Initializing Kokoro ONNX TTS", "modelPath", cfg.KokoroModelPath, "voicesPath", cfg.KokoroVoicesPath, "voice", cfg.KokoroVoice, "speed", cfg.TTS_SPEED)
orator := &KokoroONNXOrator{
logger: log,
modelPath: cfg.KokoroModelPath,
voicesPath: cfg.KokoroVoicesPath,
speed: cfg.TTS_SPEED,
voice: cfg.KokoroVoice,
}
go orator.readroutine()
go orator.stoproutine()
return orator
default:
language := cfg.TTS_LANGUAGE
if language == "" {

View File

@@ -16,17 +16,11 @@ import (
"time"
"unicode"
"sync/atomic"
"github.com/rivo/tview"
)
// Cached model color - updated by background goroutine
var cachedModelColor atomic.Value // stores string
func init() {
cachedModelColor.Store("orange")
}
var cachedModelColor string = "orange"
// startModelColorUpdater starts a background goroutine that periodically updates
// the cached model color. Only runs HTTP requests for local llama.cpp APIs.
@@ -45,20 +39,20 @@ func startModelColorUpdater() {
// updateCachedModelColor updates the global cachedModelColor variable
func updateCachedModelColor() {
if !isLocalLlamacpp() {
cachedModelColor.Store("orange")
cachedModelColor = "orange"
return
}
// Check if model is loaded
loaded, err := isModelLoaded(chatBody.GetModel())
loaded, err := isModelLoaded(chatBody.Model)
if err != nil {
// On error, assume not loaded (red)
cachedModelColor.Store("red")
cachedModelColor = "red"
return
}
if loaded {
cachedModelColor.Store("green")
cachedModelColor = "green"
} else {
cachedModelColor.Store("red")
cachedModelColor = "red"
}
}
@@ -109,7 +103,7 @@ func refreshChatDisplay() {
viewingAs = cfg.WriteNextMsgAs
}
// Filter messages for this character
filteredMessages := filterMessagesForCharacter(chatBody.GetMessages(), viewingAs)
filteredMessages := filterMessagesForCharacter(chatBody.Messages, viewingAs)
displayText := chatToText(filteredMessages, cfg.ShowSys)
textView.SetText(displayText)
colorText()
@@ -223,8 +217,8 @@ func startNewChat(keepSysP bool) {
logger.Warn("no such sys msg", "name", cfg.AssistantRole)
}
// set chat body
chatBody.TruncateMessages(2)
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
chatBody.Messages = chatBody.Messages[:2]
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
newChat := &models.Chat{
ID: id + 1,
Name: fmt.Sprintf("%d_%s", id+1, cfg.AssistantRole),
@@ -341,7 +335,7 @@ func isLocalLlamacpp() bool {
// The cached value is updated by a background goroutine every 5 seconds.
// For non-local models, returns orange. For local llama.cpp models, returns green if loaded, red if not.
func getModelColor() string {
return cachedModelColor.Load().(string)
return cachedModelColor
}
func makeStatusLine() string {
@@ -376,7 +370,7 @@ func makeStatusLine() string {
// Get model color based on load status for local llama.cpp models
modelColor := getModelColor()
statusLine := fmt.Sprintf(statusLineTempl, activeChatName,
boolColors[cfg.ToolUse], modelColor, chatBody.GetModel(), boolColors[cfg.SkipLLMResp],
boolColors[cfg.ToolUse], modelColor, chatBody.Model, boolColors[cfg.SkipLLMResp],
cfg.CurrentAPI, persona, botPersona)
if cfg.STT_ENABLED {
recordingS := fmt.Sprintf(" | [%s:-:b]voice recording[-:-:-] (ctrl+r)",
@@ -402,11 +396,11 @@ func makeStatusLine() string {
}
func getContextTokens() int {
if chatBody == nil {
if chatBody == nil || chatBody.Messages == nil {
return 0
}
total := 0
messages := chatBody.GetMessages()
messages := chatBody.Messages
for i := range messages {
msg := &messages[i]
if msg.Stats != nil && msg.Stats.Tokens > 0 {
@@ -421,54 +415,46 @@ func getContextTokens() int {
const deepseekContext = 128000
func getMaxContextTokens() int {
if chatBody == nil || chatBody.GetModel() == "" {
if chatBody == nil || chatBody.Model == "" {
return 0
}
modelName := chatBody.GetModel()
modelName := chatBody.Model
switch {
case strings.Contains(cfg.CurrentAPI, "openrouter"):
ord := orModelsData.Load()
if ord != nil {
data := ord.(*models.ORModels)
if data != nil {
for i := range data.Data {
m := &data.Data[i]
if m.ID == modelName {
return m.ContextLength
}
if orModelsData != nil {
for i := range orModelsData.Data {
m := &orModelsData.Data[i]
if m.ID == modelName {
return m.ContextLength
}
}
}
case strings.Contains(cfg.CurrentAPI, "deepseek"):
return deepseekContext
default:
lmd := localModelsData.Load()
if lmd != nil {
data := lmd.(*models.LCPModels)
if data != nil {
for i := range data.Data {
m := &data.Data[i]
if m.ID == modelName {
for _, arg := range m.Status.Args {
if strings.HasPrefix(arg, "--ctx-size") {
if strings.Contains(arg, "=") {
val := strings.Split(arg, "=")[1]
if n, err := strconv.Atoi(val); err == nil {
if localModelsData != nil {
for i := range localModelsData.Data {
m := &localModelsData.Data[i]
if m.ID == modelName {
for _, arg := range m.Status.Args {
if strings.HasPrefix(arg, "--ctx-size") {
if strings.Contains(arg, "=") {
val := strings.Split(arg, "=")[1]
if n, err := strconv.Atoi(val); err == nil {
return n
}
} else {
idx := -1
for j, a := range m.Status.Args {
if a == "--ctx-size" && j+1 < len(m.Status.Args) {
idx = j + 1
break
}
}
if idx != -1 {
if n, err := strconv.Atoi(m.Status.Args[idx]); err == nil {
return n
}
} else {
idx := -1
for j, a := range m.Status.Args {
if a == "--ctx-size" && j+1 < len(m.Status.Args) {
idx = j + 1
break
}
}
if idx != -1 {
if n, err := strconv.Atoi(m.Status.Args[idx]); err == nil {
return n
}
}
}
}
}
@@ -504,7 +490,7 @@ func listChatRoles() []string {
func deepseekModelValidator() error {
if cfg.CurrentAPI == cfg.DeepSeekChatAPI || cfg.CurrentAPI == cfg.DeepSeekCompletionAPI {
if chatBody.GetModel() != "deepseek-chat" && chatBody.GetModel() != "deepseek-reasoner" {
if chatBody.Model != "deepseek-chat" && chatBody.Model != "deepseek-reasoner" {
showToast("bad request", "wrong deepseek model name")
return nil
}
@@ -581,13 +567,13 @@ func executeCommandAndDisplay(cmdText string) {
outputContent := workingDir
// Add the command being executed to the chat
fmt.Fprintf(textView, "\n[-:-:b](%d) <%s>: [-:-:-]\n$ %s\n",
chatBody.GetMessageCount(), cfg.ToolRole, cmdText)
len(chatBody.Messages), cfg.ToolRole, cmdText)
fmt.Fprintf(textView, "%s\n", outputContent)
combinedMsg := models.RoleMsg{
Role: cfg.ToolRole,
Content: "$ " + cmdText + "\n\n" + outputContent,
}
chatBody.AppendMessage(combinedMsg)
chatBody.Messages = append(chatBody.Messages, combinedMsg)
if scrollToEndEnabled {
textView.ScrollToEnd()
}
@@ -596,13 +582,13 @@ func executeCommandAndDisplay(cmdText string) {
} else {
outputContent := "cd: " + newDir + ": No such file or directory"
fmt.Fprintf(textView, "\n[-:-:b](%d) <%s>: [-:-:-]\n$ %s\n",
chatBody.GetMessageCount(), cfg.ToolRole, cmdText)
len(chatBody.Messages), cfg.ToolRole, cmdText)
fmt.Fprintf(textView, "[red]%s[-:-:-]\n", outputContent)
combinedMsg := models.RoleMsg{
Role: cfg.ToolRole,
Content: "$ " + cmdText + "\n\n" + outputContent,
}
chatBody.AppendMessage(combinedMsg)
chatBody.Messages = append(chatBody.Messages, combinedMsg)
if scrollToEndEnabled {
textView.ScrollToEnd()
}
@@ -618,7 +604,7 @@ func executeCommandAndDisplay(cmdText string) {
output, err := cmd.CombinedOutput()
// Add the command being executed to the chat
fmt.Fprintf(textView, "\n[-:-:b](%d) <%s>: [-:-:-]\n$ %s\n",
chatBody.GetMessageCount(), cfg.ToolRole, cmdText)
len(chatBody.Messages), cfg.ToolRole, cmdText)
var outputContent string
if err != nil {
// Include both output and error
@@ -649,7 +635,7 @@ func executeCommandAndDisplay(cmdText string) {
Role: cfg.ToolRole,
Content: combinedContent,
}
chatBody.AppendMessage(combinedMsg)
chatBody.Messages = append(chatBody.Messages, combinedMsg)
// Scroll to end and update colors
if scrollToEndEnabled {
textView.ScrollToEnd()
@@ -679,7 +665,7 @@ func performSearch(term string) {
searchResultLengths = nil
originalTextForSearch = ""
// Re-render text without highlights
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
return
}

55
llm.go
View File

@@ -13,9 +13,8 @@ var lastImg string // for ctrl+j
// containsToolSysMsg checks if the toolSysMsg already exists in the chat body
func containsToolSysMsg() bool {
messages := chatBody.GetMessages()
for i := range messages {
if messages[i].Role == cfg.ToolRole && messages[i].Content == toolSysMsg {
for i := range chatBody.Messages {
if chatBody.Messages[i].Role == cfg.ToolRole && chatBody.Messages[i].Content == toolSysMsg {
return true
}
}
@@ -136,13 +135,13 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro
newMsg = models.RoleMsg{Role: role, Content: msg}
}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
// sending description of the tools and how to use them
if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() {
chatBody.AppendMessage(models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
}
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages)
// Build prompt and extract images inline as we process each message
messages := make([]string, len(filteredMessages))
for i := range filteredMessages {
@@ -184,7 +183,7 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro
}
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData))
payload := models.NewLCPReq(prompt, chatBody.GetModel(), multimodalData,
payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData,
defaultLCPProps, chatBody.MakeStopSliceExcluding("", listChatRoles()))
data, err := json.Marshal(payload)
if err != nil {
@@ -290,17 +289,17 @@ func (op LCPChat) FormMsg(msg, role string, resume bool) (io.Reader, error) {
newMsg = models.NewRoleMsg(role, msg)
}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
logger.Debug("LCPChat FormMsg: added message to chatBody", "role", newMsg.Role,
"content_len", len(newMsg.Content), "message_count_after_add", chatBody.GetMessageCount())
"content_len", len(newMsg.Content), "message_count_after_add", len(chatBody.Messages))
}
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages)
// openai /v1/chat does not support custom roles; needs to be user, assistant, system
// Add persona suffix to the last user message to indicate who the assistant should reply as
bodyCopy := &models.ChatBody{
Messages: make([]models.RoleMsg, len(filteredMessages)),
Model: chatBody.GetModel(),
Stream: chatBody.GetStream(),
Model: chatBody.Model,
Stream: chatBody.Stream,
}
for i := range filteredMessages {
strippedMsg := *stripThinkingFromMsg(&filteredMessages[i])
@@ -376,13 +375,13 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader
if msg != "" { // otherwise let the bot to continue
newMsg := models.RoleMsg{Role: role, Content: msg}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
// sending description of the tools and how to use them
if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() {
chatBody.AppendMessage(models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
}
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages)
messages := make([]string, len(filteredMessages))
for i := range filteredMessages {
messages[i] = stripThinkingFromMsg(&filteredMessages[i]).ToPrompt()
@@ -395,7 +394,7 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader
}
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt)
payload := models.NewDSCompletionReq(prompt, chatBody.GetModel(),
payload := models.NewDSCompletionReq(prompt, chatBody.Model,
defaultLCPProps["temp"],
chatBody.MakeStopSliceExcluding("", listChatRoles()))
data, err := json.Marshal(payload)
@@ -449,15 +448,15 @@ func (ds DeepSeekerChat) FormMsg(msg, role string, resume bool) (io.Reader, erro
if msg != "" { // otherwise let the bot continue
newMsg := models.RoleMsg{Role: role, Content: msg}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
// Create copy of chat body with standardized user role
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages)
// Add persona suffix to the last user message to indicate who the assistant should reply as
bodyCopy := &models.ChatBody{
Messages: make([]models.RoleMsg, len(filteredMessages)),
Model: chatBody.GetModel(),
Stream: chatBody.GetStream(),
Model: chatBody.Model,
Stream: chatBody.Stream,
}
for i := range filteredMessages {
strippedMsg := *stripThinkingFromMsg(&filteredMessages[i])
@@ -528,13 +527,13 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader
if msg != "" { // otherwise let the bot to continue
newMsg := models.RoleMsg{Role: role, Content: msg}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
// sending description of the tools and how to use them
if cfg.ToolUse && !resume && role == cfg.UserRole && !containsToolSysMsg() {
chatBody.AppendMessage(models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
chatBody.Messages = append(chatBody.Messages, models.RoleMsg{Role: cfg.ToolRole, Content: toolSysMsg})
}
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, botPersona := filterMessagesForCurrentCharacter(chatBody.Messages)
messages := make([]string, len(filteredMessages))
for i := range filteredMessages {
messages[i] = stripThinkingFromMsg(&filteredMessages[i]).ToPrompt()
@@ -548,7 +547,7 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader
stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles())
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice)
payload := models.NewOpenRouterCompletionReq(chatBody.GetModel(), prompt,
payload := models.NewOpenRouterCompletionReq(chatBody.Model, prompt,
defaultLCPProps, stopSlice)
data, err := json.Marshal(payload)
if err != nil {
@@ -634,15 +633,15 @@ func (or OpenRouterChat) FormMsg(msg, role string, resume bool) (io.Reader, erro
newMsg = models.NewRoleMsg(role, msg)
}
newMsg = *processMessageTag(&newMsg)
chatBody.AppendMessage(newMsg)
chatBody.Messages = append(chatBody.Messages, newMsg)
}
// Create copy of chat body with standardized user role
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.GetMessages())
filteredMessages, _ := filterMessagesForCurrentCharacter(chatBody.Messages)
// Add persona suffix to the last user message to indicate who the assistant should reply as
bodyCopy := &models.ChatBody{
Messages: make([]models.RoleMsg, len(filteredMessages)),
Model: chatBody.GetModel(),
Stream: chatBody.GetStream(),
Model: chatBody.Model,
Stream: chatBody.Stream,
}
for i := range filteredMessages {
strippedMsg := *stripThinkingFromMsg(&filteredMessages[i])

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"strings"
"sync"
)
type FuncCall struct {
@@ -640,253 +639,3 @@ type MultimodalToolResp struct {
Type string `json:"type"`
Parts []map[string]string `json:"parts"`
}
// SafeChatBody is a thread-safe wrapper around ChatBody using RWMutex.
// This allows safe concurrent access to chat state from multiple goroutines.
type SafeChatBody struct {
mu sync.RWMutex
ChatBody
}
// NewSafeChatBody creates a new SafeChatBody from an existing ChatBody.
// If cb is nil, creates an empty ChatBody.
func NewSafeChatBody(cb *ChatBody) *SafeChatBody {
if cb == nil {
return &SafeChatBody{
ChatBody: ChatBody{
Messages: []RoleMsg{},
},
}
}
return &SafeChatBody{
ChatBody: *cb,
}
}
// GetModel returns the model name (thread-safe read).
func (s *SafeChatBody) GetModel() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Model
}
// SetModel sets the model name (thread-safe write).
func (s *SafeChatBody) SetModel(model string) {
s.mu.Lock()
defer s.mu.Unlock()
s.Model = model
}
// GetStream returns the stream flag (thread-safe read).
func (s *SafeChatBody) GetStream() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Stream
}
// SetStream sets the stream flag (thread-safe write).
func (s *SafeChatBody) SetStream(stream bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.Stream = stream
}
// GetMessages returns a copy of all messages (thread-safe read).
// Returns a copy to prevent race conditions after the lock is released.
func (s *SafeChatBody) GetMessages() []RoleMsg {
s.mu.RLock()
defer s.mu.RUnlock()
// Return a copy to prevent external modification
messagesCopy := make([]RoleMsg, len(s.Messages))
copy(messagesCopy, s.Messages)
return messagesCopy
}
// SetMessages replaces all messages (thread-safe write).
func (s *SafeChatBody) SetMessages(messages []RoleMsg) {
s.mu.Lock()
defer s.mu.Unlock()
s.Messages = messages
}
// AppendMessage adds a message to the end (thread-safe write).
func (s *SafeChatBody) AppendMessage(msg RoleMsg) {
s.mu.Lock()
defer s.mu.Unlock()
s.Messages = append(s.Messages, msg)
}
// GetMessageAt returns a message at a specific index (thread-safe read).
// Returns the message and a boolean indicating if the index was valid.
func (s *SafeChatBody) GetMessageAt(index int) (RoleMsg, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
if index < 0 || index >= len(s.Messages) {
return RoleMsg{}, false
}
return s.Messages[index], true
}
// SetMessageAt updates a message at a specific index (thread-safe write).
// Returns false if index is out of bounds.
func (s *SafeChatBody) SetMessageAt(index int, msg RoleMsg) bool {
s.mu.Lock()
defer s.mu.Unlock()
if index < 0 || index >= len(s.Messages) {
return false
}
s.Messages[index] = msg
return true
}
// GetLastMessage returns the last message (thread-safe read).
// Returns the message and a boolean indicating if the chat has messages.
func (s *SafeChatBody) GetLastMessage() (RoleMsg, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
if len(s.Messages) == 0 {
return RoleMsg{}, false
}
return s.Messages[len(s.Messages)-1], true
}
// GetMessageCount returns the number of messages (thread-safe read).
func (s *SafeChatBody) GetMessageCount() int {
s.mu.RLock()
defer s.mu.RUnlock()
return len(s.Messages)
}
// RemoveLastMessage removes the last message (thread-safe write).
// Returns false if there are no messages.
func (s *SafeChatBody) RemoveLastMessage() bool {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.Messages) == 0 {
return false
}
s.Messages = s.Messages[:len(s.Messages)-1]
return true
}
// TruncateMessages keeps only the first n messages (thread-safe write).
func (s *SafeChatBody) TruncateMessages(n int) {
s.mu.Lock()
defer s.mu.Unlock()
if n < len(s.Messages) {
s.Messages = s.Messages[:n]
}
}
// ClearMessages removes all messages (thread-safe write).
func (s *SafeChatBody) ClearMessages() {
s.mu.Lock()
defer s.mu.Unlock()
s.Messages = []RoleMsg{}
}
// Rename renames all occurrences of oldname to newname in messages (thread-safe read-modify-write).
func (s *SafeChatBody) Rename(oldname, newname string) {
s.mu.Lock()
defer s.mu.Unlock()
for i := range s.Messages {
s.Messages[i].Content = strings.ReplaceAll(s.Messages[i].Content, oldname, newname)
s.Messages[i].Role = strings.ReplaceAll(s.Messages[i].Role, oldname, newname)
}
}
// ListRoles returns all unique roles in messages (thread-safe read).
func (s *SafeChatBody) ListRoles() []string {
s.mu.RLock()
defer s.mu.RUnlock()
namesMap := make(map[string]struct{})
for i := range s.Messages {
namesMap[s.Messages[i].Role] = struct{}{}
}
resp := make([]string, len(namesMap))
i := 0
for k := range namesMap {
resp[i] = k
i++
}
return resp
}
// MakeStopSlice returns stop strings for all roles (thread-safe read).
func (s *SafeChatBody) MakeStopSlice() []string {
return s.MakeStopSliceExcluding("", s.ListRoles())
}
// MakeStopSliceExcluding returns stop strings excluding a specific role (thread-safe read).
func (s *SafeChatBody) MakeStopSliceExcluding(excludeRole string, roleList []string) []string {
s.mu.RLock()
defer s.mu.RUnlock()
ss := []string{}
for _, role := range roleList {
if role == excludeRole {
continue
}
ss = append(ss,
role+":\n",
role+":",
role+": ",
role+": ",
role+": \n",
role+": ",
)
}
return ss
}
// UpdateMessageFunc updates a message at index using a provided function.
// The function receives the current message and returns the updated message.
// This is atomic and thread-safe (read-modify-write under single lock).
// Returns false if index is out of bounds.
func (s *SafeChatBody) UpdateMessageFunc(index int, updater func(RoleMsg) RoleMsg) bool {
s.mu.Lock()
defer s.mu.Unlock()
if index < 0 || index >= len(s.Messages) {
return false
}
s.Messages[index] = updater(s.Messages[index])
return true
}
// AppendMessageFunc appends a new message created by a provided function.
// The function receives the current message count and returns the new message.
// This is atomic and thread-safe.
func (s *SafeChatBody) AppendMessageFunc(creator func(count int) RoleMsg) {
s.mu.Lock()
defer s.mu.Unlock()
msg := creator(len(s.Messages))
s.Messages = append(s.Messages, msg)
}
// GetMessagesForLLM returns a filtered copy of messages for sending to LLM.
// This is thread-safe and returns a copy safe for external modification.
func (s *SafeChatBody) GetMessagesForLLM(filterFunc func([]RoleMsg) []RoleMsg) []RoleMsg {
s.mu.RLock()
defer s.mu.RUnlock()
if filterFunc == nil {
messagesCopy := make([]RoleMsg, len(s.Messages))
copy(messagesCopy, s.Messages)
return messagesCopy
}
return filterFunc(s.Messages)
}
// WithLock executes a function while holding the write lock.
// Use this for complex operations that need to be atomic.
func (s *SafeChatBody) WithLock(fn func(*ChatBody)) {
s.mu.Lock()
defer s.mu.Unlock()
fn(&s.ChatBody)
}
// WithRLock executes a function while holding the read lock.
// Use this for complex read-only operations.
func (s *SafeChatBody) WithRLock(fn func(*ChatBody)) {
s.mu.RLock()
defer s.mu.RUnlock()
fn(&s.ChatBody)
}

View File

@@ -22,7 +22,7 @@ func showModelSelectionPopup() {
models, err := fetchLCPModelsWithLoadStatus()
if err != nil {
logger.Error("failed to fetch models with load status", "error", err)
return LocalModels.Load().([]string)
return LocalModels
}
return models
}
@@ -30,8 +30,7 @@ func showModelSelectionPopup() {
modelList := getModelListForAPI(cfg.CurrentAPI)
// Check for empty options list
if len(modelList) == 0 {
localModels := LocalModels.Load().([]string)
logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(localModels), "orModelsLen", len(ORFreeModels))
logger.Warn("empty model list for", "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels))
var message string
switch {
case strings.Contains(cfg.CurrentAPI, "openrouter.ai"):
@@ -51,7 +50,7 @@ func showModelSelectionPopup() {
// Find the current model index to set as selected
currentModelIndex := -1
for i, model := range modelList {
if strings.TrimPrefix(model, models.LoadedMark) == chatBody.GetModel() {
if strings.TrimPrefix(model, models.LoadedMark) == chatBody.Model {
currentModelIndex = i
}
modelListWidget.AddItem(model, "", 0, nil)
@@ -62,8 +61,8 @@ func showModelSelectionPopup() {
}
modelListWidget.SetSelectedFunc(func(index int, mainText string, secondaryText string, shortcut rune) {
modelName := strings.TrimPrefix(mainText, models.LoadedMark)
chatBody.SetModel(modelName)
cfg.CurrentModel = chatBody.GetModel()
chatBody.Model = modelName
cfg.CurrentModel = chatBody.Model
pages.RemovePage("modelSelectionPopup")
app.SetFocus(textArea)
updateCachedModelColor()
@@ -151,13 +150,15 @@ func showAPILinkSelectionPopup() {
}
// Assume local llama.cpp
refreshLocalModelsIfEmpty()
return LocalModels.Load().([]string)
localModelsMu.RLock()
defer localModelsMu.RUnlock()
return LocalModels
}
newModelList := getModelListForAPI(cfg.CurrentAPI)
// Ensure chatBody.Model is in the new list; if not, set to first available model
if len(newModelList) > 0 && !slices.Contains(newModelList, chatBody.GetModel()) {
chatBody.SetModel(strings.TrimPrefix(newModelList[0], models.LoadedMark))
cfg.CurrentModel = chatBody.GetModel()
if len(newModelList) > 0 && !slices.Contains(newModelList, chatBody.Model) {
chatBody.Model = strings.TrimPrefix(newModelList[0], models.LoadedMark)
cfg.CurrentModel = chatBody.Model
updateToolCapabilities()
}
pages.RemovePage("apiLinkSelectionPopup")
@@ -228,7 +229,7 @@ func showUserRoleSelectionPopup() {
// Update the user role in config
cfg.WriteNextMsgAs = mainText
// role got switch, update textview with character specific context for user
filtered := filterMessagesForCharacter(chatBody.GetMessages(), mainText)
filtered := filterMessagesForCharacter(chatBody.Messages, mainText)
textView.SetText(chatToText(filtered, cfg.ShowSys))
// Remove the popup page
pages.RemovePage("userRoleSelectionPopup")

View File

@@ -4,11 +4,14 @@ import (
"fmt"
"strconv"
"strings"
"sync"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
var _ = sync.RWMutex{}
// Define constants for cell types
const (
CellTypeCheckbox = "checkbox"
@@ -154,7 +157,9 @@ func makePropsTable(props map[string]float32) *tview.Table {
}
// Assume local llama.cpp
refreshLocalModelsIfEmpty()
return LocalModels.Load().([]string)
localModelsMu.RLock()
defer localModelsMu.RUnlock()
return LocalModels
}
// Add input fields
addInputRow("New char to write msg as", "", func(text string) {
@@ -257,8 +262,7 @@ func makePropsTable(props map[string]float32) *tview.Table {
// Check for empty options list
if len(data.Options) == 0 {
localModels := LocalModels.Load().([]string)
logger.Warn("empty options list for", "label", label, "api", cfg.CurrentAPI, "localModelsLen", len(localModels), "orModelsLen", len(ORFreeModels))
logger.Warn("empty options list for", "label", label, "api", cfg.CurrentAPI, "localModelsLen", len(LocalModels), "orModelsLen", len(ORFreeModels))
message := "No options available for " + label
if label == "Select a model" {
switch {

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"gf-lt/config"
"gf-lt/models"
"gf-lt/onnx"
"log/slog"
"net/http"
"os"
@@ -156,43 +157,6 @@ type ONNXEmbedder struct {
modelPath string
}
var onnxInitOnce sync.Once
var onnxReady bool
var onnxLibPath string
var cudaLibPath string
var onnxLibPaths = []string{
"/usr/lib/libonnxruntime.so",
"/usr/lib/libonnxruntime.so.1.24.2",
"/usr/local/lib/libonnxruntime.so",
"/usr/lib/x86_64-linux-gnu/libonnxruntime.so",
"/opt/onnxruntime/lib/libonnxruntime.so",
}
var cudaLibPaths = []string{
"/usr/lib/libonnxruntime_providers_cuda.so",
"/usr/local/lib/libonnxruntime_providers_cuda.so",
"/opt/onnxruntime/lib/libonnxruntime_providers_cuda.so",
}
func findONNXLibrary() string {
for _, path := range onnxLibPaths {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
func findCUDALibrary() string {
for _, path := range cudaLibPaths {
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Logger) (*ONNXEmbedder, error) {
// Check if model and tokenizer files exist
if _, err := os.Stat(modelPath); err != nil {
@@ -202,17 +166,16 @@ func NewONNXEmbedder(modelPath, tokenizerPath string, dims int, logger *slog.Log
return nil, fmt.Errorf("tokenizer not found: %w", err)
}
// Find ONNX library
onnxLibPath = findONNXLibrary()
if onnxLibPath == "" {
return nil, errors.New("ONNX runtime library not found in standard locations")
// Initialize ONNX runtime
if err := onnx.Init(); err != nil {
return nil, fmt.Errorf("ONNX init failed: %w", err)
}
if onnx.HasCUDASupport() {
logger.Info("ONNX CUDA support enabled")
} else {
logger.Info("ONNX using CPU fallback")
}
// Find CUDA provider library (optional)
cudaLibPath = findCUDALibrary()
if cudaLibPath == "" {
fmt.Println("WARNING: CUDA provider library not found, will use CPU")
}
emb := &ONNXEmbedder{
tokenizerPath: tokenizerPath,
dims: dims,
@@ -239,26 +202,12 @@ func (e *ONNXEmbedder) ensureInitialized() error {
}
e.tokenizer = tok
}
onnxInitOnce.Do(func() {
onnxruntime_go.SetSharedLibraryPath(onnxLibPath)
if err := onnxruntime_go.InitializeEnvironment(); err != nil {
e.logger.Error("failed to initialize ONNX runtime", "error", err)
onnxReady = false
return
}
// Register CUDA provider if available
if cudaLibPath != "" {
if err := onnxruntime_go.RegisterExecutionProviderLibrary("CUDA", cudaLibPath); err != nil {
e.logger.Warn("failed to register CUDA provider", "error", err)
}
}
onnxReady = true
})
if !onnxReady {
// ONNX runtime already initialized by onnx.Init() in NewONNXEmbedder
if !onnx.IsReady() {
return errors.New("ONNX runtime not ready")
}
// Create session options
opts, err := onnxruntime_go.NewSessionOptions()
opts, err := onnx.NewSessionOptions()
if err != nil {
return fmt.Errorf("failed to create session options: %w", err)
}
@@ -266,27 +215,7 @@ func (e *ONNXEmbedder) ensureInitialized() error {
_ = opts.Destroy()
}()
// Try to add CUDA provider
useCUDA := cudaLibPath != ""
if useCUDA {
cudaOpts, err := onnxruntime_go.NewCUDAProviderOptions()
if err != nil {
e.logger.Warn("failed to create CUDA provider options, falling back to CPU", "error", err)
useCUDA = false
} else {
defer func() {
_ = cudaOpts.Destroy()
}()
if err := cudaOpts.Update(map[string]string{"device_id": "0"}); err != nil {
e.logger.Warn("failed to update CUDA options, falling back to CPU", "error", err)
useCUDA = false
} else if err := opts.AppendExecutionProviderCUDA(cudaOpts); err != nil {
e.logger.Warn("failed to append CUDA provider, falling back to CPU", "error", err)
useCUDA = false
}
}
}
if useCUDA {
if onnx.HasCUDASupport() {
e.logger.Info("Using CUDA for ONNX inference")
} else {
e.logger.Info("Using CPU for ONNX inference")

View File

@@ -29,7 +29,7 @@ func historyToSJSON(msgs []models.RoleMsg) (string, error) {
}
func exportChat() error {
data, err := json.MarshalIndent(chatBody.GetMessages(), "", " ")
data, err := json.MarshalIndent(chatBody.Messages, "", " ")
if err != nil {
return err
}
@@ -54,7 +54,7 @@ func importChat(filename string) error {
if _, ok := chatMap[activeChatName]; !ok {
addNewChat(activeChatName)
}
chatBody.SetMessages(messages)
chatBody.Messages = messages
cfg.AssistantRole = messages[1].Role
if cfg.AssistantRole == cfg.UserRole {
cfg.AssistantRole = messages[2].Role

View File

@@ -128,8 +128,8 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table {
pages.RemovePage(historyPage)
return
}
chatBody.SetMessages(history)
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
chatBody.Messages = history
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
activeChatName = selectedChat
pages.RemovePage(historyPage)
return
@@ -149,8 +149,8 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table {
}
showToast("chat deleted", selectedChat+" was deleted")
// load last chat
chatBody.SetMessages(loadOldChatOrGetNew())
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
chatBody.Messages = loadOldChatOrGetNew()
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
pages.RemovePage(historyPage)
return
case "update card":
@@ -163,24 +163,16 @@ func makeChatTable(chatMap map[string]models.Chat) *tview.Table {
showToast("error", "no such card: "+agentName)
return
}
if msg0, ok := chatBody.GetMessageAt(0); ok {
cc.SysPrompt = msg0.Content
}
if msg1, ok := chatBody.GetMessageAt(1); ok {
cc.FirstMsg = msg1.Content
}
cc.SysPrompt = chatBody.Messages[0].Content
cc.FirstMsg = chatBody.Messages[1].Content
if err := pngmeta.WriteToPng(cc.ToSpec(cfg.UserRole), cc.FilePath, cc.FilePath); err != nil {
logger.Error("failed to write charcard", "error", err)
}
return
case "move sysprompt onto 1st msg":
chatBody.WithLock(func(cb *models.ChatBody) {
if len(cb.Messages) >= 2 {
cb.Messages[1].Content = cb.Messages[0].Content + cb.Messages[1].Content
cb.Messages[0].Content = rpDefenitionSysMsg
}
})
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
chatBody.Messages[1].Content = chatBody.Messages[0].Content + chatBody.Messages[1].Content
chatBody.Messages[0].Content = rpDefenitionSysMsg
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
activeChatName = selectedChat
pages.RemovePage(historyPage)
return
@@ -571,7 +563,7 @@ func makeAgentTable(agentList []string) *tview.Table {
return
}
// replace textview
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
updateStatusLine()
// sysModal.ClearButtons()
@@ -740,7 +732,7 @@ func makeImportChatTable(filenames []string) *tview.Table {
colorText()
updateStatusLine()
// redraw the text in text area
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
pages.RemovePage(historyPage)
app.SetFocus(textArea)
return

View File

@@ -1215,11 +1215,11 @@ func isCommandAllowed(command string, args ...string) bool {
}
func summarizeChat(args map[string]string) []byte {
if chatBody.GetMessageCount() == 0 {
if len(chatBody.Messages) == 0 {
return []byte("No chat history to summarize.")
}
// Format chat history for the agent
chatText := chatToText(chatBody.GetMessages(), true) // include system and tool messages
chatText := chatToText(chatBody.Messages, true) // include system and tool messages
return []byte(chatText)
}

56
tui.go
View File

@@ -355,7 +355,7 @@ func init() {
searchResults = nil // Clear search results
searchResultLengths = nil // Clear search result lengths
originalTextForSearch = ""
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys)) // Reset text without search regions
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys)) // Reset text without search regions
colorText() // Apply normal chat coloring
} else {
// Original logic if no search is active
@@ -436,11 +436,9 @@ func init() {
pages.RemovePage(editMsgPage)
return nil
}
chatBody.WithLock(func(cb *models.ChatBody) {
cb.Messages[selectedIndex].SetText(editedMsg)
})
chatBody.Messages[selectedIndex].SetText(editedMsg)
// change textarea
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
pages.RemovePage(editMsgPage)
editMode = false
return nil
@@ -468,11 +466,9 @@ func init() {
pages.RemovePage(roleEditPage)
return
}
if selectedIndex >= 0 && selectedIndex < chatBody.GetMessageCount() {
chatBody.WithLock(func(cb *models.ChatBody) {
cb.Messages[selectedIndex].Role = newRole
})
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
if selectedIndex >= 0 && selectedIndex < len(chatBody.Messages) {
chatBody.Messages[selectedIndex].Role = newRole
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
pages.RemovePage(roleEditPage)
}
@@ -501,7 +497,7 @@ func init() {
return nil
}
selectedIndex = siInt
if chatBody.GetMessageCount()-1 < selectedIndex || selectedIndex < 0 {
if len(chatBody.Messages)-1 < selectedIndex || selectedIndex < 0 {
msg := "chosen index is out of bounds, will copy user input"
logger.Warn(msg, "index", selectedIndex)
showToast("error", msg)
@@ -511,7 +507,7 @@ func init() {
hideIndexBar() // Hide overlay instead of removing page directly
return nil
}
m := chatBody.GetMessages()[selectedIndex]
m := chatBody.Messages[selectedIndex]
switch {
case roleEditMode:
hideIndexBar() // Hide overlay first
@@ -578,7 +574,7 @@ func init() {
searchResults = nil
searchResultLengths = nil
originalTextForSearch = ""
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
return
} else {
@@ -636,7 +632,7 @@ func init() {
//
textArea.SetMovedFunc(updateStatusLine)
updateStatusLine()
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
if scrollToEndEnabled {
textView.ScrollToEnd()
@@ -650,7 +646,7 @@ func init() {
if event.Key() == tcell.KeyRune && event.Rune() == '5' && event.Modifiers()&tcell.ModAlt != 0 {
// switch cfg.ShowSys
cfg.ShowSys = !cfg.ShowSys
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
}
if event.Key() == tcell.KeyRune && event.Rune() == '3' && event.Modifiers()&tcell.ModAlt != 0 {
@@ -683,7 +679,7 @@ func init() {
// Handle Alt+T to toggle thinking block visibility
if event.Key() == tcell.KeyRune && event.Rune() == 't' && event.Modifiers()&tcell.ModAlt != 0 {
thinkingCollapsed = !thinkingCollapsed
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
status := "expanded"
if thinkingCollapsed {
@@ -695,7 +691,7 @@ func init() {
// Handle Ctrl+T to toggle tool call/response visibility
if event.Key() == tcell.KeyCtrlT {
toolCollapsed = !toolCollapsed
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
colorText()
status := "expanded"
if toolCollapsed {
@@ -738,14 +734,14 @@ func init() {
}
if event.Key() == tcell.KeyF2 && !botRespMode {
// regen last msg
if chatBody.GetMessageCount() == 0 {
if len(chatBody.Messages) == 0 {
showToast("info", "no messages to regenerate")
return nil
}
chatBody.TruncateMessages(chatBody.GetMessageCount() - 1)
chatBody.Messages = chatBody.Messages[:len(chatBody.Messages)-1]
// there is no case where user msg is regenerated
// lastRole := chatBody.GetMessages()[chatBody.GetMessageCount()-1].Role
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
// lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
// go chatRound("", cfg.UserRole, textView, true, false)
if cfg.TTS_ENABLED {
TTSDoneChan <- true
@@ -764,12 +760,12 @@ func init() {
colorText()
return nil
}
if chatBody.GetMessageCount() == 0 {
if len(chatBody.Messages) == 0 {
showToast("info", "no messages to delete")
return nil
}
chatBody.TruncateMessages(chatBody.GetMessageCount() - 1)
textView.SetText(chatToText(chatBody.GetMessages(), cfg.ShowSys))
chatBody.Messages = chatBody.Messages[:len(chatBody.Messages)-1]
textView.SetText(chatToText(chatBody.Messages, cfg.ShowSys))
if cfg.TTS_ENABLED {
TTSDoneChan <- true
}
@@ -817,7 +813,7 @@ func init() {
if event.Key() == tcell.KeyF7 {
// copy msg to clipboard
editMode = false
m := chatBody.GetMessages()[chatBody.GetMessageCount()-1]
m := chatBody.Messages[len(chatBody.Messages)-1]
msgText := m.GetText()
if err := copyToClipboard(msgText); err != nil {
logger.Error("failed to copy to clipboard", "error", err)
@@ -1001,10 +997,10 @@ func init() {
TTSDoneChan <- true
}
if event.Key() == tcell.KeyRune && event.Rune() == '0' && event.Modifiers()&tcell.ModAlt != 0 && cfg.TTS_ENABLED {
if chatBody.GetMessageCount() > 0 {
if len(chatBody.Messages) > 0 {
// Stop any currently playing TTS first
TTSDoneChan <- true
lastMsg := chatBody.GetMessages()[chatBody.GetMessageCount()-1]
lastMsg := chatBody.Messages[len(chatBody.Messages)-1]
cleanedText := models.CleanText(lastMsg.GetText())
if cleanedText != "" {
// nolint: errcheck
@@ -1016,7 +1012,7 @@ func init() {
if event.Key() == tcell.KeyCtrlW {
// INFO: continue bot/text message
// without new role
lastRole := chatBody.GetMessages()[chatBody.GetMessageCount()-1].Role
lastRole := chatBody.Messages[len(chatBody.Messages)-1].Role
// go chatRound("", lastRole, textView, false, true)
chatRoundChan <- &models.ChatRoundReq{Role: lastRole, Resume: true}
return nil
@@ -1102,7 +1098,7 @@ func init() {
if event.Key() == tcell.KeyRune && event.Modifiers() == tcell.ModAlt && event.Rune() == '9' {
// Warm up (load) the currently selected model
go warmUpModel()
showToast("model warmup", "loading model: "+chatBody.GetModel())
showToast("model warmup", "loading model: "+chatBody.Model)
return nil
}
// cannot send msg in editMode or botRespMode
@@ -1141,7 +1137,7 @@ func init() {
}
// add user icon before user msg
fmt.Fprintf(textView, "%s[-:-:b](%d) <%s>: [-:-:-]\n%s\n",
nl, chatBody.GetMessageCount(), persona, msgText)
nl, len(chatBody.Messages), persona, msgText)
textArea.SetText("", true)
if scrollToEndEnabled {
textView.ScrollToEnd()