Enha: local onnx

This commit is contained in:
Grail Finder
2026-03-05 14:13:58 +03:00
parent c65c11bcfb
commit fbc955ca37
5 changed files with 172 additions and 7 deletions

4
go.mod
View File

@@ -7,7 +7,6 @@ require (
github.com/GrailFinder/google-translate-tts v0.1.3 github.com/GrailFinder/google-translate-tts v0.1.3
github.com/GrailFinder/searchagent v0.2.0 github.com/GrailFinder/searchagent v0.2.0
github.com/PuerkitoBio/goquery v1.11.0 github.com/PuerkitoBio/goquery v1.11.0
github.com/deckarep/golang-set/v2 v2.8.0
github.com/gdamore/tcell/v2 v2.13.2 github.com/gdamore/tcell/v2 v2.13.2
github.com/glebarez/go-sqlite v1.22.0 github.com/glebarez/go-sqlite v1.22.0
github.com/gopxl/beep/v2 v2.1.1 github.com/gopxl/beep/v2 v2.1.1
@@ -17,11 +16,14 @@ require (
github.com/neurosnap/sentences v1.1.2 github.com/neurosnap/sentences v1.1.2
github.com/playwright-community/playwright-go v0.5700.1 github.com/playwright-community/playwright-go v0.5700.1
github.com/rivo/tview v0.42.0 github.com/rivo/tview v0.42.0
github.com/takara-ai/go-tokenizers v1.0.0
github.com/yalue/onnxruntime_go v1.27.0
github.com/yuin/goldmark v1.4.13 github.com/yuin/goldmark v1.4.13
) )
require ( require (
github.com/andybalholm/cascadia v1.3.3 // indirect github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/deckarep/golang-set/v2 v2.8.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/oto/v3 v3.4.0 // indirect github.com/ebitengine/oto/v3 v3.4.0 // indirect
github.com/ebitengine/purego v0.9.1 // indirect github.com/ebitengine/purego v0.9.1 // indirect

4
go.sum
View File

@@ -81,6 +81,10 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/takara-ai/go-tokenizers v1.0.0 h1:C+UQl3fPFw08YQdwthzPZbqykh6yumzjPrSs+3OSe7o=
github.com/takara-ai/go-tokenizers v1.0.0/go.mod h1:2A7hN3gMtAARJ2V3sYyIzTDm+GNTudBX+CwUOyIVH2A=
github.com/yalue/onnxruntime_go v1.27.0 h1:c1YSgDNtpf0WGtxj3YeRIb8VC5LmM1J+Ve3uHdteC1U=
github.com/yalue/onnxruntime_go v1.27.0/go.mod h1:b4X26A8pekNb1ACJ58wAXgNKeUCGEAQ9dmACut9Sm/4=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=

View File

@@ -9,6 +9,10 @@ import (
"gf-lt/models" "gf-lt/models"
"log/slog" "log/slog"
"net/http" "net/http"
"github.com/takara-ai/go-tokenizers/tokenizers"
"github.com/yalue/onnxruntime_go"
) )
// Embedder defines the interface for embedding text // Embedder defines the interface for embedding text
@@ -134,11 +138,62 @@ func (a *APIEmbedder) EmbedSlice(lines []string) ([][]float32, error) {
return embeddings, nil return embeddings, nil
} }
// TODO: ONNXEmbedder implementation would go here
// This would require:
// 1. Loading ONNX models locally // 1. Loading ONNX models locally
// 2. Using a Go ONNX runtime (like gorgonia/onnx or similar) // 2. Using a Go ONNX runtime (like gorgonia/onnx or similar)
// 3. Converting text to embeddings without external API calls // 3. Converting text to embeddings without external API calls
//
// For now, we'll focus on the API implementation which is already working in the current system, type ONNXEmbedder struct {
// and can be extended later when we have ONNX runtime integration session *onnxruntime_go.DynamicAdvancedSession
tokenizer *tokenizers.Tokenizer
dims int // 768, 512, 256, or 128 for Matryoshka
}
func (e *ONNXEmbedder) EmbedSlice(texts []string) ([][]float32, error) {
// Batch processing
inputs := e.prepareBatch(texts)
outputs := make([][]float32, len(texts))
// Run batch inference (much faster)
err := e.session.Run(inputs, outputs)
return outputs, err
}
func NewONNXEmbedder(modelPath string) (*ONNXEmbedder, error) {
// Load ONNX model
session, err := onnxruntime_go.NewDynamicAdvancedSession(
modelPath, // onnx/embedgemma/model_q4.onnx
[]string{"input_ids", "attention_mask"},
[]string{"sentence_embedding"},
nil,
)
if err != nil {
return nil, err
}
// Load tokenizer (from Hugging Face)
tokenizer, err := tokenizers.FromFile("./tokenizer.json")
return &ONNXEmbedder{
session: session,
tokenizer: tokenizer,
}, nil
}
func (e *ONNXEmbedder) Embed(text string) ([]float32, error) {
// Tokenize
tokens := e.tokenizer.Encode(text, true)
// Prepare inputs
inputIDs := []int64{tokens.GetIds()}
attentionMask := []int64{tokens.GetAttentionMask()}
// Run inference
output := onnxruntime_go.NewEmptyTensor[float32](
onnxruntime_go.NewShape(1, 768),
)
err := e.session.Run(
map[string]any{
"input_ids": inputIDs,
"attention_mask": attentionMask,
},
[]string{"sentence_embedding"},
[]any{&output},
)
return output.GetData(), nil
}

View File

@@ -246,7 +246,7 @@ func (r *RAG) extractImportantPhrases(query string) string {
break break
} }
} }
if isImportant || len(word) > 3 { if isImportant || len(word) >= 3 {
important = append(important, word) important = append(important, word)
} }
} }

104
rag_issues.md Normal file
View File

@@ -0,0 +1,104 @@
# RAG Implementation Issues and Proposed Solutions
## Overview
The current RAG system fails to retrieve relevant information for specific queries, as demonstrated by the inability to find the "two she bears" reference in the KJV Bible (2 Kings 2:23-24). While the system retrieves documents containing the word "bear", it misses the actual verse, indicating fundamental flaws in chunking, query processing, retrieval, and answer synthesis. Below we dissect each problem and propose concrete solutions.
---
### Problem 1: Chunking Destroys Semantic Coherence
- **Problem description**
The current chunking splits text into sentences and groups them by a simple word count threshold (`RAGWordLimit`). This ignores document structure (chapters, headings) and can cut through narrative units, scattering related content across multiple chunks. For the Bible query, the story of Elisha and the bears likely spans multiple verses; splitting it prevents any single chunk from containing the full context, diluting the embedding signal and making retrieval difficult.
- **Proposed solution**
- **Structure-aware chunking**: Use the EPUBs internal structure (chapters, sections) to create chunks that align with logical content units (e.g., by chapter or story).
- **Overlap between chunks**: Add a configurable overlap (e.g., 1020% of chunk size) to preserve continuity, ensuring key phrases like "two she bears" are not split across boundaries.
- **Rich metadata**: Store book name, chapter, and verse numbers with each chunk to enable filtering and source attribution.
- **Fallback to recursive splitting**: For documents without clear structure, use a recursive character text splitter with overlap (similar to LangChains `RecursiveCharacterTextSplitter`) to maintain semantic boundaries (paragraphs, sentences).
---
### Problem 2: Query Refinement Strips Critical Context
- **Problem description**
`RefineQuery` removes stop words and applies keyword-based filtering that discards semantically important modifiers. For "two she bears", the word "she" (a gender modifier) may be treated as a stop word, leaving "two bears". This loses the specificity of the query and causes the embedding to drift toward generic "bear" contexts. The rule-based approach cannot understand that "she bears" is a key phrase in the biblical story.
- **Proposed solution**
- **Entity-aware query preservation**: Use a lightweight NLP model (e.g., spaCy or a BERT-based NER tagger) to identify and retain key entities (quantities, animals, names) while only removing truly irrelevant stop words.
- **Intelligent query rewriting**: Employ a small LLM (or a set of transformation rules) to generate query variations that reflect likely biblical phrasing, e.g., "two bears came out of the wood" or "Elisha and the bears".
- **Contextual stop word removal**: Instead of a static list, use a POS tagger to keep adjectives, nouns, and verbs while removing only function words that don't carry meaning.
- **Disable refinement for short queries**: If the query is already concise (like "two she bears"), skip aggressive filtering.
---
### Problem 3: Embedding Similarity Fails for Rare or Specific Phrases
- **Problem description**
Dense embeddings excel at capturing semantic similarity but can fail when the query contains rare phrases or when the relevant passage is embedded in a noisy chunk. The verse "there came forth two she bears out of the wood" shares only the word "bears" with the query, and its embedding may be pulled toward the average of surrounding verses. Consequently, the similarity score may be lower than that of other chunks containing the word "bear" in generic contexts.
- **Proposed solution**
- **Hybrid retrieval**: Combine dense embeddings with BM25 (keyword) search. BM25 excels at exact phrase matching and would likely retrieve the verse based on "two bears" even if the embedding is weak.
- Use a library like [blevesearch](https://github.com/blevesearch/bleve) to index text alongside vectors.
- Fuse results using Reciprocal Rank Fusion (RRF) or a weighted combination.
- **Query expansion**: Add relevant terms to the query (e.g., "Elisha", "2 Kings") to improve embedding alignment.
- **Fine-tuned embeddings**: Consider using an embedding model fine-tuned on domain-specific data (e.g., biblical texts) if this is a recurring use case.
---
### Problem 4: Reranking Heuristics Are Insufficient
- **Problem description**
`RerankResults` boosts results based on simple keyword matching and file name heuristics. This coarse approach cannot reliably promote the correct verse over false positives. The adjustment `distance - score/100` is arbitrary and may not reflect true relevance.
- **Proposed solution**
- **Cross-encoder reranking**: After retrieving top candidates (e.g., top 20) with hybrid search, rerank them using a cross-encoder model that directly computes the relevance score between the query and each chunk.
- Models like `cross-encoder/ms-marco-MiniLM-L-6-v2` are lightweight and can be run locally or via a microservice.
- **Score normalization**: Use the cross-encoder scores to reorder results, discarding low-scoring ones.
- **Contextual boosting**: If metadata (e.g., chapter/verse) is available, boost results that match the querys expected location (if inferable).
---
### Problem 5: Answer Synthesis Is Not Generative
- **Problem description**
`SynthesizeAnswer` embeds a prompt and attempts to retrieve a pre-stored answer, falling back to concatenating truncated chunks. This is fundamentally flawed: RAG requires an LLM to generate a coherent answer from retrieved context. In the Bible example, even if the correct verse were retrieved, the system would only output a snippet, not an answer explaining the reference.
- **Proposed solution**
- **Integrate an LLM for generation**: Use a local model (via Ollama, Llama.cpp) or a cloud API (OpenAI, etc.) to synthesize answers.
- Construct a prompt that includes the retrieved chunks (with metadata) and the user query.
- Instruct the model to answer based solely on the provided context and cite sources (e.g., "According to 2 Kings 2:24...").
- **Implement a fallback**: If no relevant chunks are retrieved, return a message like "I couldn't find that information in your documents."
- **Streaming support**: For better UX, stream the answer token-by-token.
---
### Problem 6: Concurrency and Error Handling
- **Problem description**
The code uses a mutex only in `LoadRAG`, leaving other methods vulnerable to race conditions. The global status channel `LongJobStatusCh` may drop messages due to `select/default`, and errors are sometimes logged but not propagated. Ingestion is synchronous and slow.
- **Proposed solution**
- **Add context support**: Pass `context.Context` to all methods to allow cancellation and timeouts.
- **Worker pools for embedding**: Parallelize batch embedding with a controlled number of workers to respect API rate limits and speed up ingestion.
- **Retry logic**: Implement exponential backoff for transient API errors.
- **Replace global channel**: Use a callback or an injectable status reporter to avoid dropping messages.
- **Fine-grained locking**: Protect shared state (e.g., `storage`) with appropriate synchronization.
---
### Problem 7: Lack of Monitoring and Evaluation
- **Problem description**
There are no metrics to track retrieval quality, latency, or user satisfaction. The failure case was discovered manually; without systematic evaluation, regressions will go unnoticed.
- **Proposed solution**
- **Log key metrics**: Record query, retrieved chunk IDs, scores, and latency for each search.
- **User feedback**: Add a mechanism for users to rate answers (thumbs up/down) and use this data to improve retrieval.
- **Offline evaluation**: Create a test set of queries and expected relevant chunks (e.g., the Bible example) to measure recall@k, MRR, etc., and run it after each change.
---
## Summary
Fixing the RAG pipeline requires a multi-pronged approach:
1. **Structure-aware chunking** with metadata.
2. **Hybrid retrieval** (dense + sparse).
3. **Query understanding** via entity preservation and intelligent rewriting.
4. **Cross-encoder reranking** for precision.
5. **LLM-based answer generation**.
6. **Robust concurrency and error handling**.
7. **Monitoring and evaluation** to track improvements.
Implementing these changes will transform the system from a brittle keyword matcher into a reliable knowledge assistant capable of handling nuanced queries like the "two she bears" reference.