From 9af21895c6d15e7ab648f35acd44afc9675b54b7 Mon Sep 17 00:00:00 2001 From: Grail Finder Date: Tue, 24 Feb 2026 08:59:34 +0300 Subject: [PATCH] Chore: remove cfg.ThinkUse move cleaning image attachment to the end of chatRound fmt cleanup --- Makefile | 8 +++++++- bot.go | 25 +++++-------------------- config/config.go | 1 - docs/config.md | 3 --- llm.go | 9 --------- models/models.go | 3 +-- props_table.go | 3 --- tui.go | 7 ------- 8 files changed, 13 insertions(+), 46 deletions(-) diff --git a/Makefile b/Makefile index 9583041..b185d27 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run +.PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run installdelve checkdelve run: setconfig go build -tags extra -o gf-lt && ./gf-lt @@ -15,6 +15,12 @@ noextra-run: setconfig setconfig: find config.toml &>/dev/null || cp config.example.toml config.toml +installdelve: + go install github.com/go-delve/delve/cmd/dlv@latest + +checkdelve: + which dlv &>/dev/null || installdelve + lint: ## Run linters. Use make install-linters first. golangci-lint run -c .golangci.yml ./... diff --git a/bot.go b/bot.go index de75b15..04dadb4 100644 --- a/bot.go +++ b/bot.go @@ -569,7 +569,6 @@ func sendMsgToLLM(body io.Reader) { streamDone <- true return } - // Check if the initial response is an error before starting to stream if resp.StatusCode >= 400 { // Read the response body to get detailed error information @@ -584,7 +583,6 @@ func sendMsgToLLM(body io.Reader) { streamDone <- true return } - // Parse the error response for detailed information detailedError := extractDetailedErrorFromBytes(bodyBytes, resp.StatusCode) logger.Error("API returned error status", "status_code", resp.StatusCode, "detailed_error", detailedError) @@ -710,7 +708,6 @@ func sendMsgToLLM(body io.Reader) { tokenCount++ } } - // When we get content and have been streaming reasoning, close the thinking block if chunk.Chunk != "" && hasReasoning && !reasoningSent { // Close the thinking block before sending actual content @@ -718,7 +715,6 @@ func sendMsgToLLM(body io.Reader) { tokenCount++ reasoningSent = true } - // bot sends way too many \n answerText = strings.ReplaceAll(chunk.Chunk, "\n\n", "\n") // Accumulate text to check for stop strings that might span across chunks @@ -764,12 +760,10 @@ func chatRagUse(qText string) (string, error) { questions[i] = q.Text logger.Debug("RAG question extracted", "index", i, "question", q.Text) } - if len(questions) == 0 { logger.Warn("No questions extracted from query text", "query", qText) return "No related results from RAG vector storage.", nil } - respVecs := []models.VectorRow{} for i, q := range questions { logger.Debug("Processing RAG question", "index", i, "question", q) @@ -779,7 +773,6 @@ func chatRagUse(qText string) (string, error) { continue } logger.Debug("Got embeddings for question", "index", i, "question_len", len(q), "embedding_len", len(emb)) - // Create EmbeddingResp struct for the search embeddingResp := &models.EmbeddingResp{ Embedding: emb, @@ -793,7 +786,6 @@ func chatRagUse(qText string) (string, error) { logger.Debug("RAG search returned vectors", "index", i, "question", q, "vector_count", len(vecs)) respVecs = append(respVecs, vecs...) } - // get raw text resps := []string{} logger.Debug("RAG query final results", "total_vecs_found", len(respVecs)) @@ -801,12 +793,10 @@ func chatRagUse(qText string) (string, error) { resps = append(resps, rv.RawText) logger.Debug("RAG result", "slug", rv.Slug, "filename", rv.FileName, "raw_text_len", len(rv.RawText)) } - if len(resps) == 0 { logger.Info("No RAG results found for query", "original_query", qText, "question_count", len(questions)) return "No related results from RAG vector storage.", nil } - result := strings.Join(resps, "\n") logger.Debug("RAG query completed", "result_len", len(result), "response_count", len(resps)) return result, nil @@ -836,7 +826,10 @@ func chatRound(r *models.ChatRoundReq) error { if cfg.WriteNextMsgAsCompletionAgent != "" { botPersona = cfg.WriteNextMsgAsCompletionAgent } - defer func() { botRespMode = false }() + defer func() { + botRespMode = false + ClearImageAttachment() + }() // check that there is a model set to use if is not local choseChunkParser() reader, err := chunkParser.FormMsg(r.UserMsg, r.Role, r.Resume) @@ -862,13 +855,7 @@ func chatRound(r *models.ChatRoundReq) error { } else if strings.HasSuffix(prevText, "\n") { nl = "\n" } - fmt.Fprintf(textView, "%s[-:-:b](%d) ", nl, msgIdx) - fmt.Fprint(textView, roleToIcon(botPersona)) - fmt.Fprint(textView, "[-:-:-]\n") - if cfg.ThinkUse && !strings.Contains(cfg.CurrentAPI, "v1") { - // fmt.Fprint(textView, "") - chunkChan <- "" - } + fmt.Fprintf(textView, "%s[-:-:b](%d) %s[-:-:-]\n", nl, msgIdx, roleToIcon(botPersona)) } else { msgIdx = len(chatBody.Messages) - 1 } @@ -1246,7 +1233,6 @@ func chatToTextSlice(messages []models.RoleMsg, showSys bool) []string { func chatToText(messages []models.RoleMsg, showSys bool) string { s := chatToTextSlice(messages, showSys) text := strings.Join(s, "\n") - // Collapse thinking blocks if enabled if thinkingCollapsed { text = thinkRE.ReplaceAllStringFunc(text, func(match string) string { @@ -1270,7 +1256,6 @@ func chatToText(messages []models.RoleMsg, showSys bool) string { } } } - return text } diff --git a/config/config.go b/config/config.go index ed70370..d745afb 100644 --- a/config/config.go +++ b/config/config.go @@ -18,7 +18,6 @@ type Config struct { UserRole string `toml:"UserRole"` ToolRole string `toml:"ToolRole"` ToolUse bool `toml:"ToolUse"` - ThinkUse bool `toml:"ThinkUse"` StripThinkingFromAPI bool `toml:"StripThinkingFromAPI"` ReasoningEffort string `toml:"ReasoningEffort"` AssistantRole string `toml:"AssistantRole"` diff --git a/docs/config.md b/docs/config.md index c5a70b2..57198ea 100644 --- a/docs/config.md +++ b/docs/config.md @@ -165,9 +165,6 @@ Those could be switched in program, but also bould be setup in config. #### ToolUse - Enable or disable explanation of tools to llm, so it could use them. -#### ThinkUse -- Enable or disable insertion of JsonSerializerToken at the beggining of llm resp. - ### StripThinkingFromAPI (`true`) - Strip thinking blocks from messages before sending to LLM. Keeps them in chat history for local viewing but reduces token usage in API calls. diff --git a/llm.go b/llm.go index 3da8488..6697dfa 100644 --- a/llm.go +++ b/llm.go @@ -184,9 +184,6 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } - if cfg.ThinkUse && !cfg.ToolUse { - prompt += "" - } logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, @@ -423,9 +420,6 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } - if cfg.ThinkUse && !cfg.ToolUse { - prompt += "" - } logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt) payload := models.NewDSCompletionReq(prompt, chatBody.Model, @@ -589,9 +583,6 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader botMsgStart := "\n" + botPersona + ":\n" prompt += botMsgStart } - if cfg.ThinkUse && !cfg.ToolUse { - prompt += "" - } stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles()) logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice) diff --git a/models/models.go b/models/models.go index 4e33ba5..ee13928 100644 --- a/models/models.go +++ b/models/models.go @@ -241,8 +241,7 @@ func (m *RoleMsg) ToText(i int) string { } finalContent.WriteString(contentStr) if m.Stats != nil { - finalContent.WriteString(fmt.Sprintf("\n[gray::i][%d tok, %.1fs, %.1f t/s][-:-:-]", - m.Stats.Tokens, m.Stats.Duration, m.Stats.TokensPerSec)) + fmt.Fprintf(&finalContent, "\n[gray::i][%d tok, %.1fs, %.1f t/s][-:-:-]", m.Stats.Tokens, m.Stats.Duration, m.Stats.TokensPerSec) } textMsg := fmt.Sprintf("[-:-:b]%s[-:-:-]\n%s\n", icon, finalContent.String()) return strings.ReplaceAll(textMsg, "\n\n", "\n") diff --git a/props_table.go b/props_table.go index a1ec657..f8432cd 100644 --- a/props_table.go +++ b/props_table.go @@ -115,9 +115,6 @@ func makePropsTable(props map[string]float32) *tview.Table { row++ } // Add checkboxes - addCheckboxRow("Insert tag (/completion only)", cfg.ThinkUse, func(checked bool) { - cfg.ThinkUse = checked - }) addCheckboxRow("RAG use", cfg.RAGEnabled, func(checked bool) { cfg.RAGEnabled = checked }) diff --git a/tui.go b/tui.go index 0e95709..c7bfa06 100644 --- a/tui.go +++ b/tui.go @@ -995,13 +995,6 @@ func init() { } // go chatRound(msgText, persona, textView, false, false) chatRoundChan <- &models.ChatRoundReq{Role: persona, UserMsg: msgText} - // Also clear any image attachment after sending the message - go func() { - // Wait a short moment for the message to be processed, then clear the image attachment - // This allows the image to be sent with the current message if it was attached - // But clears it for the next message - ClearImageAttachment() - }() } return nil }