Chore: remove cfg.ThinkUse

move cleaning image attachment to the end of chatRound
fmt cleanup
This commit is contained in:
Grail Finder
2026-02-24 08:59:34 +03:00
parent e3bd6f219f
commit 9af21895c6
8 changed files with 13 additions and 46 deletions

View File

@@ -1,4 +1,4 @@
.PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run .PHONY: setconfig run lint setup-whisper build-whisper download-whisper-model docker-up docker-down docker-logs noextra-run installdelve checkdelve
run: setconfig run: setconfig
go build -tags extra -o gf-lt && ./gf-lt go build -tags extra -o gf-lt && ./gf-lt
@@ -15,6 +15,12 @@ noextra-run: setconfig
setconfig: setconfig:
find config.toml &>/dev/null || cp config.example.toml config.toml find config.toml &>/dev/null || cp config.example.toml config.toml
installdelve:
go install github.com/go-delve/delve/cmd/dlv@latest
checkdelve:
which dlv &>/dev/null || installdelve
lint: ## Run linters. Use make install-linters first. lint: ## Run linters. Use make install-linters first.
golangci-lint run -c .golangci.yml ./... golangci-lint run -c .golangci.yml ./...

25
bot.go
View File

@@ -569,7 +569,6 @@ func sendMsgToLLM(body io.Reader) {
streamDone <- true streamDone <- true
return return
} }
// Check if the initial response is an error before starting to stream // Check if the initial response is an error before starting to stream
if resp.StatusCode >= 400 { if resp.StatusCode >= 400 {
// Read the response body to get detailed error information // Read the response body to get detailed error information
@@ -584,7 +583,6 @@ func sendMsgToLLM(body io.Reader) {
streamDone <- true streamDone <- true
return return
} }
// Parse the error response for detailed information // Parse the error response for detailed information
detailedError := extractDetailedErrorFromBytes(bodyBytes, resp.StatusCode) detailedError := extractDetailedErrorFromBytes(bodyBytes, resp.StatusCode)
logger.Error("API returned error status", "status_code", resp.StatusCode, "detailed_error", detailedError) logger.Error("API returned error status", "status_code", resp.StatusCode, "detailed_error", detailedError)
@@ -710,7 +708,6 @@ func sendMsgToLLM(body io.Reader) {
tokenCount++ tokenCount++
} }
} }
// When we get content and have been streaming reasoning, close the thinking block // When we get content and have been streaming reasoning, close the thinking block
if chunk.Chunk != "" && hasReasoning && !reasoningSent { if chunk.Chunk != "" && hasReasoning && !reasoningSent {
// Close the thinking block before sending actual content // Close the thinking block before sending actual content
@@ -718,7 +715,6 @@ func sendMsgToLLM(body io.Reader) {
tokenCount++ tokenCount++
reasoningSent = true reasoningSent = true
} }
// bot sends way too many \n // bot sends way too many \n
answerText = strings.ReplaceAll(chunk.Chunk, "\n\n", "\n") answerText = strings.ReplaceAll(chunk.Chunk, "\n\n", "\n")
// Accumulate text to check for stop strings that might span across chunks // Accumulate text to check for stop strings that might span across chunks
@@ -764,12 +760,10 @@ func chatRagUse(qText string) (string, error) {
questions[i] = q.Text questions[i] = q.Text
logger.Debug("RAG question extracted", "index", i, "question", q.Text) logger.Debug("RAG question extracted", "index", i, "question", q.Text)
} }
if len(questions) == 0 { if len(questions) == 0 {
logger.Warn("No questions extracted from query text", "query", qText) logger.Warn("No questions extracted from query text", "query", qText)
return "No related results from RAG vector storage.", nil return "No related results from RAG vector storage.", nil
} }
respVecs := []models.VectorRow{} respVecs := []models.VectorRow{}
for i, q := range questions { for i, q := range questions {
logger.Debug("Processing RAG question", "index", i, "question", q) logger.Debug("Processing RAG question", "index", i, "question", q)
@@ -779,7 +773,6 @@ func chatRagUse(qText string) (string, error) {
continue continue
} }
logger.Debug("Got embeddings for question", "index", i, "question_len", len(q), "embedding_len", len(emb)) logger.Debug("Got embeddings for question", "index", i, "question_len", len(q), "embedding_len", len(emb))
// Create EmbeddingResp struct for the search // Create EmbeddingResp struct for the search
embeddingResp := &models.EmbeddingResp{ embeddingResp := &models.EmbeddingResp{
Embedding: emb, Embedding: emb,
@@ -793,7 +786,6 @@ func chatRagUse(qText string) (string, error) {
logger.Debug("RAG search returned vectors", "index", i, "question", q, "vector_count", len(vecs)) logger.Debug("RAG search returned vectors", "index", i, "question", q, "vector_count", len(vecs))
respVecs = append(respVecs, vecs...) respVecs = append(respVecs, vecs...)
} }
// get raw text // get raw text
resps := []string{} resps := []string{}
logger.Debug("RAG query final results", "total_vecs_found", len(respVecs)) logger.Debug("RAG query final results", "total_vecs_found", len(respVecs))
@@ -801,12 +793,10 @@ func chatRagUse(qText string) (string, error) {
resps = append(resps, rv.RawText) resps = append(resps, rv.RawText)
logger.Debug("RAG result", "slug", rv.Slug, "filename", rv.FileName, "raw_text_len", len(rv.RawText)) logger.Debug("RAG result", "slug", rv.Slug, "filename", rv.FileName, "raw_text_len", len(rv.RawText))
} }
if len(resps) == 0 { if len(resps) == 0 {
logger.Info("No RAG results found for query", "original_query", qText, "question_count", len(questions)) logger.Info("No RAG results found for query", "original_query", qText, "question_count", len(questions))
return "No related results from RAG vector storage.", nil return "No related results from RAG vector storage.", nil
} }
result := strings.Join(resps, "\n") result := strings.Join(resps, "\n")
logger.Debug("RAG query completed", "result_len", len(result), "response_count", len(resps)) logger.Debug("RAG query completed", "result_len", len(result), "response_count", len(resps))
return result, nil return result, nil
@@ -836,7 +826,10 @@ func chatRound(r *models.ChatRoundReq) error {
if cfg.WriteNextMsgAsCompletionAgent != "" { if cfg.WriteNextMsgAsCompletionAgent != "" {
botPersona = cfg.WriteNextMsgAsCompletionAgent botPersona = cfg.WriteNextMsgAsCompletionAgent
} }
defer func() { botRespMode = false }() defer func() {
botRespMode = false
ClearImageAttachment()
}()
// check that there is a model set to use if is not local // check that there is a model set to use if is not local
choseChunkParser() choseChunkParser()
reader, err := chunkParser.FormMsg(r.UserMsg, r.Role, r.Resume) reader, err := chunkParser.FormMsg(r.UserMsg, r.Role, r.Resume)
@@ -862,13 +855,7 @@ func chatRound(r *models.ChatRoundReq) error {
} else if strings.HasSuffix(prevText, "\n") { } else if strings.HasSuffix(prevText, "\n") {
nl = "\n" nl = "\n"
} }
fmt.Fprintf(textView, "%s[-:-:b](%d) ", nl, msgIdx) fmt.Fprintf(textView, "%s[-:-:b](%d) %s[-:-:-]\n", nl, msgIdx, roleToIcon(botPersona))
fmt.Fprint(textView, roleToIcon(botPersona))
fmt.Fprint(textView, "[-:-:-]\n")
if cfg.ThinkUse && !strings.Contains(cfg.CurrentAPI, "v1") {
// fmt.Fprint(textView, "<think>")
chunkChan <- "<think>"
}
} else { } else {
msgIdx = len(chatBody.Messages) - 1 msgIdx = len(chatBody.Messages) - 1
} }
@@ -1246,7 +1233,6 @@ func chatToTextSlice(messages []models.RoleMsg, showSys bool) []string {
func chatToText(messages []models.RoleMsg, showSys bool) string { func chatToText(messages []models.RoleMsg, showSys bool) string {
s := chatToTextSlice(messages, showSys) s := chatToTextSlice(messages, showSys)
text := strings.Join(s, "\n") text := strings.Join(s, "\n")
// Collapse thinking blocks if enabled // Collapse thinking blocks if enabled
if thinkingCollapsed { if thinkingCollapsed {
text = thinkRE.ReplaceAllStringFunc(text, func(match string) string { text = thinkRE.ReplaceAllStringFunc(text, func(match string) string {
@@ -1270,7 +1256,6 @@ func chatToText(messages []models.RoleMsg, showSys bool) string {
} }
} }
} }
return text return text
} }

View File

@@ -18,7 +18,6 @@ type Config struct {
UserRole string `toml:"UserRole"` UserRole string `toml:"UserRole"`
ToolRole string `toml:"ToolRole"` ToolRole string `toml:"ToolRole"`
ToolUse bool `toml:"ToolUse"` ToolUse bool `toml:"ToolUse"`
ThinkUse bool `toml:"ThinkUse"`
StripThinkingFromAPI bool `toml:"StripThinkingFromAPI"` StripThinkingFromAPI bool `toml:"StripThinkingFromAPI"`
ReasoningEffort string `toml:"ReasoningEffort"` ReasoningEffort string `toml:"ReasoningEffort"`
AssistantRole string `toml:"AssistantRole"` AssistantRole string `toml:"AssistantRole"`

View File

@@ -165,9 +165,6 @@ Those could be switched in program, but also bould be setup in config.
#### ToolUse #### ToolUse
- Enable or disable explanation of tools to llm, so it could use them. - Enable or disable explanation of tools to llm, so it could use them.
#### ThinkUse
- Enable or disable insertion of JsonSerializerToken at the beggining of llm resp.
### StripThinkingFromAPI (`true`) ### StripThinkingFromAPI (`true`)
- Strip thinking blocks from messages before sending to LLM. Keeps them in chat history for local viewing but reduces token usage in API calls. - Strip thinking blocks from messages before sending to LLM. Keeps them in chat history for local viewing but reduces token usage in API calls.

9
llm.go
View File

@@ -184,9 +184,6 @@ func (lcp LCPCompletion) FormMsg(msg, role string, resume bool) (io.Reader, erro
botMsgStart := "\n" + botPersona + ":\n" botMsgStart := "\n" + botPersona + ":\n"
prompt += botMsgStart prompt += botMsgStart
} }
if cfg.ThinkUse && !cfg.ToolUse {
prompt += "<think>"
}
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData)) "msg", msg, "resume", resume, "prompt", prompt, "multimodal_data_count", len(multimodalData))
payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData, payload := models.NewLCPReq(prompt, chatBody.Model, multimodalData,
@@ -423,9 +420,6 @@ func (ds DeepSeekerCompletion) FormMsg(msg, role string, resume bool) (io.Reader
botMsgStart := "\n" + botPersona + ":\n" botMsgStart := "\n" + botPersona + ":\n"
prompt += botMsgStart prompt += botMsgStart
} }
if cfg.ThinkUse && !cfg.ToolUse {
prompt += "<think>"
}
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt) "msg", msg, "resume", resume, "prompt", prompt)
payload := models.NewDSCompletionReq(prompt, chatBody.Model, payload := models.NewDSCompletionReq(prompt, chatBody.Model,
@@ -589,9 +583,6 @@ func (or OpenRouterCompletion) FormMsg(msg, role string, resume bool) (io.Reader
botMsgStart := "\n" + botPersona + ":\n" botMsgStart := "\n" + botPersona + ":\n"
prompt += botMsgStart prompt += botMsgStart
} }
if cfg.ThinkUse && !cfg.ToolUse {
prompt += "<think>"
}
stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles()) stopSlice := chatBody.MakeStopSliceExcluding("", listChatRoles())
logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse, logger.Debug("checking prompt for /completion", "tool_use", cfg.ToolUse,
"msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice) "msg", msg, "resume", resume, "prompt", prompt, "stop_strings", stopSlice)

View File

@@ -241,8 +241,7 @@ func (m *RoleMsg) ToText(i int) string {
} }
finalContent.WriteString(contentStr) finalContent.WriteString(contentStr)
if m.Stats != nil { if m.Stats != nil {
finalContent.WriteString(fmt.Sprintf("\n[gray::i][%d tok, %.1fs, %.1f t/s][-:-:-]", fmt.Fprintf(&finalContent, "\n[gray::i][%d tok, %.1fs, %.1f t/s][-:-:-]", m.Stats.Tokens, m.Stats.Duration, m.Stats.TokensPerSec)
m.Stats.Tokens, m.Stats.Duration, m.Stats.TokensPerSec))
} }
textMsg := fmt.Sprintf("[-:-:b]%s[-:-:-]\n%s\n", icon, finalContent.String()) textMsg := fmt.Sprintf("[-:-:b]%s[-:-:-]\n%s\n", icon, finalContent.String())
return strings.ReplaceAll(textMsg, "\n\n", "\n") return strings.ReplaceAll(textMsg, "\n\n", "\n")

View File

@@ -115,9 +115,6 @@ func makePropsTable(props map[string]float32) *tview.Table {
row++ row++
} }
// Add checkboxes // Add checkboxes
addCheckboxRow("Insert <think> tag (/completion only)", cfg.ThinkUse, func(checked bool) {
cfg.ThinkUse = checked
})
addCheckboxRow("RAG use", cfg.RAGEnabled, func(checked bool) { addCheckboxRow("RAG use", cfg.RAGEnabled, func(checked bool) {
cfg.RAGEnabled = checked cfg.RAGEnabled = checked
}) })

7
tui.go
View File

@@ -995,13 +995,6 @@ func init() {
} }
// go chatRound(msgText, persona, textView, false, false) // go chatRound(msgText, persona, textView, false, false)
chatRoundChan <- &models.ChatRoundReq{Role: persona, UserMsg: msgText} chatRoundChan <- &models.ChatRoundReq{Role: persona, UserMsg: msgText}
// Also clear any image attachment after sending the message
go func() {
// Wait a short moment for the message to be processed, then clear the image attachment
// This allows the image to be sent with the current message if it was attached
// But clears it for the next message
ClearImageAttachment()
}()
} }
return nil return nil
} }