Chore: remove old rag

This commit is contained in:
Grail Finder
2025-10-19 13:14:56 +03:00
parent dfa164e871
commit 60ccaed200
8 changed files with 101 additions and 393 deletions

10
bot.go
View File

@@ -9,7 +9,7 @@ import (
"gf-lt/config" "gf-lt/config"
"gf-lt/extra" "gf-lt/extra"
"gf-lt/models" "gf-lt/models"
"gf-lt/rag_new" "gf-lt/rag"
"gf-lt/storage" "gf-lt/storage"
"io" "io"
"log/slog" "log/slog"
@@ -41,7 +41,7 @@ var (
defaultStarter = []models.RoleMsg{} defaultStarter = []models.RoleMsg{}
defaultStarterBytes = []byte{} defaultStarterBytes = []byte{}
interruptResp = false interruptResp = false
ragger *rag_new.RAG ragger *rag.RAG
chunkParser ChunkParser chunkParser ChunkParser
lastToolCall *models.FuncCall lastToolCall *models.FuncCall
//nolint:unused // TTS_ENABLED conditionally uses this //nolint:unused // TTS_ENABLED conditionally uses this
@@ -277,13 +277,13 @@ func chatRagUse(qText string) (string, error) {
logger.Error("failed to get embs", "error", err, "index", i, "question", q) logger.Error("failed to get embs", "error", err, "index", i, "question", q)
continue continue
} }
// Create EmbeddingResp struct for the search // Create EmbeddingResp struct for the search
embeddingResp := &models.EmbeddingResp{ embeddingResp := &models.EmbeddingResp{
Embedding: emb, Embedding: emb,
Index: 0, // Not used in search but required for the struct Index: 0, // Not used in search but required for the struct
} }
vecs, err := ragger.SearchEmb(embeddingResp) vecs, err := ragger.SearchEmb(embeddingResp)
if err != nil { if err != nil {
logger.Error("failed to query embs", "error", err, "index", i, "question", q) logger.Error("failed to query embs", "error", err, "index", i, "question", q)
@@ -571,7 +571,7 @@ func init() {
if store == nil { if store == nil {
os.Exit(1) os.Exit(1)
} }
ragger = rag_new.New(logger, store, cfg) ragger = rag.New(logger, store, cfg)
// https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md // https://github.com/coreydaley/ggerganov-llama.cpp/blob/master/examples/server/README.md
// load all chats in memory // load all chats in memory
if _, err := loadHistoryChats(); err != nil { if _, err := loadHistoryChats(); err != nil {

View File

@@ -1,10 +1,10 @@
package rag_new package rag
import ( import (
"bytes" "bytes"
"gf-lt/config"
"encoding/json" "encoding/json"
"fmt" "fmt"
"gf-lt/config"
"log/slog" "log/slog"
"net/http" "net/http"
) )
@@ -17,9 +17,9 @@ type Embedder interface {
// APIEmbedder implements embedder using an API (like Hugging Face, OpenAI, etc.) // APIEmbedder implements embedder using an API (like Hugging Face, OpenAI, etc.)
type APIEmbedder struct { type APIEmbedder struct {
logger *slog.Logger logger *slog.Logger
client *http.Client client *http.Client
cfg *config.Config cfg *config.Config
} }
func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder { func NewAPIEmbedder(l *slog.Logger, cfg *config.Config) *APIEmbedder {
@@ -44,11 +44,11 @@ func (a *APIEmbedder) Embed(text []string) ([][]float32, error) {
a.logger.Error("failed to create new req", "err", err.Error()) a.logger.Error("failed to create new req", "err", err.Error())
return nil, err return nil, err
} }
if a.cfg.HFToken != "" { if a.cfg.HFToken != "" {
req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken) req.Header.Add("Authorization", "Bearer "+a.cfg.HFToken)
} }
resp, err := a.client.Do(req) resp, err := a.client.Do(req)
if err != nil { if err != nil {
a.logger.Error("failed to embed text", "err", err.Error()) a.logger.Error("failed to embed text", "err", err.Error())
@@ -95,4 +95,5 @@ func (a *APIEmbedder) EmbedSingle(text string) ([]float32, error) {
// 3. Converting text to embeddings without external API calls // 3. Converting text to embeddings without external API calls
// //
// For now, we'll focus on the API implementation which is already working in the current system, // For now, we'll focus on the API implementation which is already working in the current system,
// and can be extended later when we have ONNX runtime integration // and can be extended later when we have ONNX runtime integration

View File

@@ -1,265 +0,0 @@
package rag
import (
"bytes"
"gf-lt/config"
"gf-lt/models"
"gf-lt/storage"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"os"
"path"
"strings"
"sync"
"github.com/neurosnap/sentences/english"
)
var (
LongJobStatusCh = make(chan string, 1)
// messages
FinishedRAGStatus = "finished loading RAG file; press Enter"
LoadedFileRAGStatus = "loaded file"
ErrRAGStatus = "some error occured; failed to transfer data to vector db"
)
type RAG struct {
logger *slog.Logger
store storage.FullRepo
cfg *config.Config
}
func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
return &RAG{
logger: l,
store: s,
cfg: cfg,
}
}
func wordCounter(sentence string) int {
return len(strings.Split(sentence, " "))
}
func (r *RAG) LoadRAG(fpath string) error {
data, err := os.ReadFile(fpath)
if err != nil {
return err
}
r.logger.Debug("rag: loaded file", "fp", fpath)
LongJobStatusCh <- LoadedFileRAGStatus
fileText := string(data)
tokenizer, err := english.NewSentenceTokenizer(nil)
if err != nil {
return err
}
sentences := tokenizer.Tokenize(fileText)
sents := make([]string, len(sentences))
for i, s := range sentences {
sents[i] = s.Text
}
var (
maxChSize = 1000
left = 0
right = r.cfg.RAGBatchSize
batchCh = make(chan map[int][]string, maxChSize)
vectorCh = make(chan []models.VectorRow, maxChSize)
errCh = make(chan error, 1)
doneCh = make(chan bool, 1)
lock = new(sync.Mutex)
)
defer close(doneCh)
defer close(errCh)
defer close(batchCh)
// group sentences
paragraphs := []string{}
par := strings.Builder{}
for i := 0; i < len(sents); i++ {
par.WriteString(sents[i])
if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) {
paragraphs = append(paragraphs, par.String())
par.Reset()
}
}
if len(paragraphs) < int(r.cfg.RAGBatchSize) {
r.cfg.RAGBatchSize = len(paragraphs)
}
// fill input channel
ctn := 0
for {
if int(right) > len(paragraphs) {
batchCh <- map[int][]string{left: paragraphs[left:]}
break
}
batchCh <- map[int][]string{left: paragraphs[left:right]}
left, right = right, right+r.cfg.RAGBatchSize
ctn++
}
finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", len(batchCh), len(paragraphs), len(sents))
r.logger.Debug(finishedBatchesMsg)
LongJobStatusCh <- finishedBatchesMsg
for w := 0; w < int(r.cfg.RAGWorkers); w++ {
go r.batchToVectorHFAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
}
// wait for emb to be done
<-doneCh
// write to db
return r.writeVectors(vectorCh)
}
func (r *RAG) writeVectors(vectorCh chan []models.VectorRow) error {
for {
for batch := range vectorCh {
for _, vector := range batch {
if err := r.store.WriteVector(&vector); err != nil {
r.logger.Error("failed to write vector", "error", err, "slug", vector.Slug)
LongJobStatusCh <- ErrRAGStatus
continue // a duplicate is not critical
// return err
}
}
r.logger.Debug("wrote batch to db", "size", len(batch), "vector_chan_len", len(vectorCh))
if len(vectorCh) == 0 {
r.logger.Debug("finished writing vectors")
LongJobStatusCh <- FinishedRAGStatus
defer close(vectorCh)
return nil
}
}
}
}
func (r *RAG) batchToVectorHFAsync(lock *sync.Mutex, id int, inputCh <-chan map[int][]string,
vectorCh chan<- []models.VectorRow, errCh chan error, doneCh chan bool, filename string) {
for {
lock.Lock()
if len(inputCh) == 0 {
if len(doneCh) == 0 {
doneCh <- true
}
lock.Unlock()
return
}
select {
case linesMap := <-inputCh:
for leftI, v := range linesMap {
r.fecthEmbHF(v, errCh, vectorCh, fmt.Sprintf("%s_%d", filename, leftI), filename)
}
lock.Unlock()
case err := <-errCh:
r.logger.Error("got an error", "error", err)
lock.Unlock()
return
}
r.logger.Debug("to vector batches", "batches#", len(inputCh), "worker#", id)
LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
}
}
func (r *RAG) fecthEmbHF(lines []string, errCh chan error, vectorCh chan<- []models.VectorRow, slug, filename string) {
payload, err := json.Marshal(
map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
)
if err != nil {
r.logger.Error("failed to marshal payload", "err:", err.Error())
errCh <- err
return
}
// nolint
req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
if err != nil {
r.logger.Error("failed to create new req", "err:", err.Error())
errCh <- err
return
}
req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
resp, err := http.DefaultClient.Do(req)
if err != nil {
r.logger.Error("failed to embedd line", "err:", err.Error())
errCh <- err
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
r.logger.Error("non 200 resp", "code", resp.StatusCode)
return
}
emb := [][]float32{}
if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
r.logger.Error("failed to embedd line", "err:", err.Error())
errCh <- err
return
}
if len(emb) == 0 {
r.logger.Error("empty emb")
err = errors.New("empty emb")
errCh <- err
return
}
vectors := make([]models.VectorRow, len(emb))
for i, e := range emb {
vector := models.VectorRow{
Embeddings: e,
RawText: lines[i],
Slug: fmt.Sprintf("%s_%d", slug, i),
FileName: filename,
}
vectors[i] = vector
}
vectorCh <- vectors
}
func (r *RAG) LineToVector(line string) ([]float32, error) {
lines := []string{line}
payload, err := json.Marshal(
map[string]any{"inputs": lines, "options": map[string]bool{"wait_for_model": true}},
)
if err != nil {
r.logger.Error("failed to marshal payload", "err:", err.Error())
return nil, err
}
// nolint
req, err := http.NewRequest("POST", r.cfg.EmbedURL, bytes.NewReader(payload))
if err != nil {
r.logger.Error("failed to create new req", "err:", err.Error())
return nil, err
}
req.Header.Add("Authorization", "Bearer "+r.cfg.HFToken)
resp, err := http.DefaultClient.Do(req)
if err != nil {
r.logger.Error("failed to embedd line", "err:", err.Error())
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
err = fmt.Errorf("non 200 resp; code: %v", resp.StatusCode)
r.logger.Error(err.Error())
return nil, err
}
emb := [][]float32{}
if err := json.NewDecoder(resp.Body).Decode(&emb); err != nil {
r.logger.Error("failed to embedd line", "err:", err.Error())
return nil, err
}
if len(emb) == 0 || len(emb[0]) == 0 {
r.logger.Error("empty emb")
err = errors.New("empty emb")
return nil, err
}
return emb[0], nil
}
func (r *RAG) SearchEmb(emb *models.EmbeddingResp) ([]models.VectorRow, error) {
return r.store.SearchClosest(emb.Embedding)
}
func (r *RAG) ListLoaded() ([]string, error) {
return r.store.ListFiles()
}
func (r *RAG) RemoveFile(filename string) error {
return r.store.RemoveEmbByFileName(filename)
}

View File

@@ -1,10 +1,10 @@
package rag_new package rag
import ( import (
"fmt"
"gf-lt/config" "gf-lt/config"
"gf-lt/models" "gf-lt/models"
"gf-lt/storage" "gf-lt/storage"
"fmt"
"log/slog" "log/slog"
"os" "os"
"path" "path"
@@ -16,37 +16,37 @@ import (
var ( var (
// Status messages for TUI integration // Status messages for TUI integration
LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking LongJobStatusCh = make(chan string, 10) // Increased buffer size to prevent blocking
FinishedRAGStatus = "finished loading RAG file; press Enter" FinishedRAGStatus = "finished loading RAG file; press Enter"
LoadedFileRAGStatus = "loaded file" LoadedFileRAGStatus = "loaded file"
ErrRAGStatus = "some error occurred; failed to transfer data to vector db" ErrRAGStatus = "some error occurred; failed to transfer data to vector db"
) )
type RAG struct { type RAG struct {
logger *slog.Logger logger *slog.Logger
store storage.FullRepo store storage.FullRepo
cfg *config.Config cfg *config.Config
embedder Embedder embedder Embedder
storage *VectorStorage storage *VectorStorage
} }
func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG { func New(l *slog.Logger, s storage.FullRepo, cfg *config.Config) *RAG {
// Initialize with API embedder by default, could be configurable later // Initialize with API embedder by default, could be configurable later
embedder := NewAPIEmbedder(l, cfg) embedder := NewAPIEmbedder(l, cfg)
rag := &RAG{ rag := &RAG{
logger: l, logger: l,
store: s, store: s,
cfg: cfg, cfg: cfg,
embedder: embedder, embedder: embedder,
storage: NewVectorStorage(l, s), storage: NewVectorStorage(l, s),
} }
// Create the necessary tables // Create the necessary tables
if err := rag.storage.CreateTables(); err != nil { if err := rag.storage.CreateTables(); err != nil {
l.Error("failed to create vector tables", "error", err) l.Error("failed to create vector tables", "error", err)
} }
return rag return rag
} }
@@ -61,7 +61,7 @@ func (r *RAG) LoadRAG(fpath string) error {
} }
r.logger.Debug("rag: loaded file", "fp", fpath) r.logger.Debug("rag: loaded file", "fp", fpath)
LongJobStatusCh <- LoadedFileRAGStatus LongJobStatusCh <- LoadedFileRAGStatus
fileText := string(data) fileText := string(data)
tokenizer, err := english.NewSentenceTokenizer(nil) tokenizer, err := english.NewSentenceTokenizer(nil)
if err != nil { if err != nil {
@@ -72,7 +72,7 @@ func (r *RAG) LoadRAG(fpath string) error {
for i, s := range sentences { for i, s := range sentences {
sents[i] = s.Text sents[i] = s.Text
} }
// Group sentences into paragraphs based on word limit // Group sentences into paragraphs based on word limit
paragraphs := []string{} paragraphs := []string{}
par := strings.Builder{} par := strings.Builder{}
@@ -84,7 +84,7 @@ func (r *RAG) LoadRAG(fpath string) error {
} }
par.WriteString(sents[i]) par.WriteString(sents[i])
} }
if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) { if wordCounter(par.String()) > int(r.cfg.RAGWordLimit) {
paragraph := strings.TrimSpace(par.String()) paragraph := strings.TrimSpace(par.String())
if paragraph != "" { if paragraph != "" {
@@ -93,7 +93,7 @@ func (r *RAG) LoadRAG(fpath string) error {
par.Reset() par.Reset()
} }
} }
// Handle any remaining content in the paragraph buffer // Handle any remaining content in the paragraph buffer
if par.Len() > 0 { if par.Len() > 0 {
paragraph := strings.TrimSpace(par.String()) paragraph := strings.TrimSpace(par.String())
@@ -101,16 +101,16 @@ func (r *RAG) LoadRAG(fpath string) error {
paragraphs = append(paragraphs, paragraph) paragraphs = append(paragraphs, paragraph)
} }
} }
// Adjust batch size if needed // Adjust batch size if needed
if len(paragraphs) < int(r.cfg.RAGBatchSize) && len(paragraphs) > 0 { if len(paragraphs) < int(r.cfg.RAGBatchSize) && len(paragraphs) > 0 {
r.cfg.RAGBatchSize = len(paragraphs) r.cfg.RAGBatchSize = len(paragraphs)
} }
if len(paragraphs) == 0 { if len(paragraphs) == 0 {
return fmt.Errorf("no valid paragraphs found in file") return fmt.Errorf("no valid paragraphs found in file")
} }
var ( var (
maxChSize = 100 maxChSize = 100
left = 0 left = 0
@@ -121,11 +121,11 @@ func (r *RAG) LoadRAG(fpath string) error {
doneCh = make(chan bool, 1) doneCh = make(chan bool, 1)
lock = new(sync.Mutex) lock = new(sync.Mutex)
) )
defer close(doneCh) defer close(doneCh)
defer close(errCh) defer close(errCh)
defer close(batchCh) defer close(batchCh)
// Fill input channel with batches // Fill input channel with batches
ctn := 0 ctn := 0
totalParagraphs := len(paragraphs) totalParagraphs := len(paragraphs)
@@ -138,19 +138,19 @@ func (r *RAG) LoadRAG(fpath string) error {
left, right = right, right+r.cfg.RAGBatchSize left, right = right, right+r.cfg.RAGBatchSize
ctn++ ctn++
} }
finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", ctn+1, len(paragraphs), len(sents)) finishedBatchesMsg := fmt.Sprintf("finished batching batches#: %d; paragraphs: %d; sentences: %d\n", ctn+1, len(paragraphs), len(sents))
r.logger.Debug(finishedBatchesMsg) r.logger.Debug(finishedBatchesMsg)
LongJobStatusCh <- finishedBatchesMsg LongJobStatusCh <- finishedBatchesMsg
// Start worker goroutines // Start worker goroutines
for w := 0; w < int(r.cfg.RAGWorkers); w++ { for w := 0; w < int(r.cfg.RAGWorkers); w++ {
go r.batchToVectorAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath)) go r.batchToVectorAsync(lock, w, batchCh, vectorCh, errCh, doneCh, path.Base(fpath))
} }
// Wait for embedding to be done // Wait for embedding to be done
<-doneCh <-doneCh
// Write vectors to storage // Write vectors to storage
return r.writeVectors(vectorCh) return r.writeVectors(vectorCh)
} }
@@ -182,14 +182,14 @@ func (r *RAG) batchToVectorAsync(lock *sync.Mutex, id int, inputCh <-chan map[in
doneCh <- true doneCh <- true
} }
}() }()
for { for {
lock.Lock() lock.Lock()
if len(inputCh) == 0 { if len(inputCh) == 0 {
lock.Unlock() lock.Unlock()
return return
} }
select { select {
case linesMap := <-inputCh: case linesMap := <-inputCh:
for leftI, lines := range linesMap { for leftI, lines := range linesMap {
@@ -207,7 +207,7 @@ func (r *RAG) batchToVectorAsync(lock *sync.Mutex, id int, inputCh <-chan map[in
default: default:
lock.Unlock() lock.Unlock()
} }
r.logger.Debug("processed batch", "batches#", len(inputCh), "worker#", id) r.logger.Debug("processed batch", "batches#", len(inputCh), "worker#", id)
LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id) LongJobStatusCh <- fmt.Sprintf("converted to vector; batches: %d, worker#: %d", len(inputCh), id)
} }
@@ -220,14 +220,14 @@ func (r *RAG) fetchEmb(lines []string, errCh chan error, vectorCh chan<- []model
errCh <- err errCh <- err
return err return err
} }
if len(embeddings) == 0 { if len(embeddings) == 0 {
err := fmt.Errorf("no embeddings returned") err := fmt.Errorf("no embeddings returned")
r.logger.Error("empty embeddings") r.logger.Error("empty embeddings")
errCh <- err errCh <- err
return err return err
} }
vectors := make([]models.VectorRow, len(embeddings)) vectors := make([]models.VectorRow, len(embeddings))
for i, emb := range embeddings { for i, emb := range embeddings {
vector := models.VectorRow{ vector := models.VectorRow{
@@ -238,7 +238,7 @@ func (r *RAG) fetchEmb(lines []string, errCh chan error, vectorCh chan<- []model
} }
vectors[i] = vector vectors[i] = vector
} }
vectorCh <- vectors vectorCh <- vectors
return nil return nil
} }
@@ -257,4 +257,5 @@ func (r *RAG) ListLoaded() ([]string, error) {
func (r *RAG) RemoveFile(filename string) error { func (r *RAG) RemoveFile(filename string) error {
return r.storage.RemoveEmbByFileName(filename) return r.storage.RemoveEmbByFileName(filename)
} }

View File

@@ -1,10 +1,10 @@
package rag_new package rag
import ( import (
"gf-lt/models"
"gf-lt/storage"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"gf-lt/models"
"gf-lt/storage"
"log/slog" "log/slog"
"sort" "sort"
"strings" "strings"
@@ -23,7 +23,7 @@ type VectorStorage struct {
func NewVectorStorage(logger *slog.Logger, store storage.FullRepo) *VectorStorage { func NewVectorStorage(logger *slog.Logger, store storage.FullRepo) *VectorStorage {
return &VectorStorage{ return &VectorStorage{
logger: logger, logger: logger,
sqlxDB: store.DB(), // Use the new DB() method sqlxDB: store.DB(), // Use the new DB() method
store: store, store: store,
} }
} }
@@ -53,7 +53,7 @@ func (vs *VectorStorage) CreateTables() error {
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_filename ON embeddings_5120(filename)`, `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_filename ON embeddings_5120(filename)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_384_slug ON embeddings_384(slug)`, `CREATE INDEX IF NOT EXISTS idx_embeddings_384_slug ON embeddings_384(slug)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_slug ON embeddings_5120(slug)`, `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_slug ON embeddings_5120(slug)`,
// Additional indexes that may help with searches // Additional indexes that may help with searches
`CREATE INDEX IF NOT EXISTS idx_embeddings_384_created_at ON embeddings_384(created_at)`, `CREATE INDEX IF NOT EXISTS idx_embeddings_384_created_at ON embeddings_384(created_at)`,
`CREATE INDEX IF NOT EXISTS idx_embeddings_5120_created_at ON embeddings_5120(created_at)`, `CREATE INDEX IF NOT EXISTS idx_embeddings_5120_created_at ON embeddings_5120(created_at)`,
@@ -140,7 +140,7 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
// For better performance, instead of loading all vectors at once, // For better performance, instead of loading all vectors at once,
// we'll implement batching and potentially add L2 distance-based pre-filtering // we'll implement batching and potentially add L2 distance-based pre-filtering
// since cosine similarity is related to L2 distance for normalized vectors // since cosine similarity is related to L2 distance for normalized vectors
querySQL := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s", tableName) querySQL := fmt.Sprintf("SELECT embeddings, slug, raw_text, filename FROM %s", tableName)
rows, err := vs.sqlxDB.Query(querySQL) rows, err := vs.sqlxDB.Query(querySQL)
if err != nil { if err != nil {
@@ -153,27 +153,27 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
vector models.VectorRow vector models.VectorRow
distance float32 distance float32
} }
var topResults []SearchResult var topResults []SearchResult
// Process vectors one by one to avoid loading everything into memory // Process vectors one by one to avoid loading everything into memory
for rows.Next() { for rows.Next() {
var ( var (
embeddingsBlob []byte embeddingsBlob []byte
slug, rawText, fileName string slug, rawText, fileName string
) )
if err := rows.Scan(&embeddingsBlob, &slug, &rawText, &fileName); err != nil { if err := rows.Scan(&embeddingsBlob, &slug, &rawText, &fileName); err != nil {
vs.logger.Error("failed to scan row", "error", err) vs.logger.Error("failed to scan row", "error", err)
continue continue
} }
storedEmbeddings := DeserializeVector(embeddingsBlob) storedEmbeddings := DeserializeVector(embeddingsBlob)
// Calculate cosine similarity (returns value between -1 and 1, where 1 is most similar) // Calculate cosine similarity (returns value between -1 and 1, where 1 is most similar)
similarity := cosineSimilarity(query, storedEmbeddings) similarity := cosineSimilarity(query, storedEmbeddings)
distance := 1 - similarity // Convert to distance where 0 is most similar distance := 1 - similarity // Convert to distance where 0 is most similar
result := SearchResult{ result := SearchResult{
vector: models.VectorRow{ vector: models.VectorRow{
Embeddings: storedEmbeddings, Embeddings: storedEmbeddings,
@@ -183,34 +183,34 @@ func (vs *VectorStorage) SearchClosest(query []float32) ([]models.VectorRow, err
}, },
distance: distance, distance: distance,
} }
// Add to top results and maintain only top 3 // Add to top results and maintain only top 3
topResults = append(topResults, result) topResults = append(topResults, result)
// Sort and keep only top 3 // Sort and keep only top 3
sort.Slice(topResults, func(i, j int) bool { sort.Slice(topResults, func(i, j int) bool {
return topResults[i].distance < topResults[j].distance return topResults[i].distance < topResults[j].distance
}) })
if len(topResults) > 3 { if len(topResults) > 3 {
topResults = topResults[:3] // Keep only closest 3 topResults = topResults[:3] // Keep only closest 3
} }
} }
// Convert back to VectorRow slice // Convert back to VectorRow slice
var results []models.VectorRow var results []models.VectorRow
for _, result := range topResults { for _, result := range topResults {
result.vector.Distance = result.distance result.vector.Distance = result.distance
results = append(results, result.vector) results = append(results, result.vector)
} }
return results, nil return results, nil
} }
// ListFiles returns a list of all loaded files // ListFiles returns a list of all loaded files
func (vs *VectorStorage) ListFiles() ([]string, error) { func (vs *VectorStorage) ListFiles() ([]string, error) {
var fileLists [][]string var fileLists [][]string
// Query both tables and combine results // Query both tables and combine results
for _, table := range []string{"embeddings_384", "embeddings_5120"} { for _, table := range []string{"embeddings_384", "embeddings_5120"} {
query := fmt.Sprintf("SELECT DISTINCT filename FROM %s", table) query := fmt.Sprintf("SELECT DISTINCT filename FROM %s", table)
@@ -219,7 +219,7 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
// Continue if one table doesn't exist // Continue if one table doesn't exist
continue continue
} }
var files []string var files []string
for rows.Next() { for rows.Next() {
var filename string var filename string
@@ -229,10 +229,10 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
files = append(files, filename) files = append(files, filename)
} }
rows.Close() rows.Close()
fileLists = append(fileLists, files) fileLists = append(fileLists, files)
} }
// Combine and deduplicate // Combine and deduplicate
fileSet := make(map[string]bool) fileSet := make(map[string]bool)
var allFiles []string var allFiles []string
@@ -244,25 +244,25 @@ func (vs *VectorStorage) ListFiles() ([]string, error) {
} }
} }
} }
return allFiles, nil return allFiles, nil
} }
// RemoveEmbByFileName removes all embeddings associated with a specific filename // RemoveEmbByFileName removes all embeddings associated with a specific filename
func (vs *VectorStorage) RemoveEmbByFileName(filename string) error { func (vs *VectorStorage) RemoveEmbByFileName(filename string) error {
var errors []string var errors []string
for _, table := range []string{"embeddings_384", "embeddings_5120"} { for _, table := range []string{"embeddings_384", "embeddings_5120"} {
query := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", table) query := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", table)
if _, err := vs.sqlxDB.Exec(query, filename); err != nil { if _, err := vs.sqlxDB.Exec(query, filename); err != nil {
errors = append(errors, err.Error()) errors = append(errors, err.Error())
} }
} }
if len(errors) > 0 { if len(errors) > 0 {
return fmt.Errorf("errors occurred: %s", strings.Join(errors, "; ")) return fmt.Errorf("errors occurred: %s", strings.Join(errors, "; "))
} }
return nil return nil
} }
@@ -297,4 +297,5 @@ func sqrt(f float32) float32 {
guess = (guess + f/guess) / 2 guess = (guess + f/guess) / 2
} }
return guess return guess
} }

View File

@@ -5,8 +5,6 @@ import (
"fmt" "fmt"
"io/fs" "io/fs"
"strings" "strings"
_ "github.com/asg017/sqlite-vec-go-bindings/ncruces"
) )
//go:embed migrations/* //go:embed migrations/*
@@ -53,8 +51,8 @@ func (p *ProviderSQL) executeMigration(migrationsDir fs.FS, fileName string) err
} }
func (p *ProviderSQL) executeSQL(sqlContent []byte) error { func (p *ProviderSQL) executeSQL(sqlContent []byte) error {
// Connect to the database (example using a simple connection) // Execute the migration content using standard database connection
err := p.s3Conn.Exec(string(sqlContent)) _, err := p.db.Exec(string(sqlContent))
if err != nil { if err != nil {
return fmt.Errorf("failed to execute SQL: %w", err) return fmt.Errorf("failed to execute SQL: %w", err)
} }

View File

@@ -6,7 +6,6 @@ import (
_ "github.com/glebarez/go-sqlite" _ "github.com/glebarez/go-sqlite"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/ncruces/go-sqlite3"
) )
type FullRepo interface { type FullRepo interface {
@@ -28,7 +27,6 @@ type ChatHistory interface {
type ProviderSQL struct { type ProviderSQL struct {
db *sqlx.DB db *sqlx.DB
s3Conn *sqlite3.Conn
logger *slog.Logger logger *slog.Logger
} }
@@ -97,7 +95,7 @@ func (p ProviderSQL) ChatGetMaxID() (uint32, error) {
return id, err return id, err
} }
// opens two connections // opens database connection
func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo { func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo {
db, err := sqlx.Open("sqlite", dbPath) db, err := sqlx.Open("sqlite", dbPath)
if err != nil { if err != nil {
@@ -105,11 +103,7 @@ func NewProviderSQL(dbPath string, logger *slog.Logger) FullRepo {
return nil return nil
} }
p := ProviderSQL{db: db, logger: logger} p := ProviderSQL{db: db, logger: logger}
p.s3Conn, err = sqlite3.Open(dbPath)
if err != nil {
logger.Error("failed to open vecdb connection", "error", err)
return nil
}
p.Migrate() p.Migrate()
return p return p
} }

View File

@@ -66,35 +66,13 @@ func (p ProviderSQL) WriteVector(row *models.VectorRow) error {
if err != nil { if err != nil {
return err return err
} }
stmt, _, err := p.s3Conn.Prepare(
fmt.Sprintf("INSERT INTO %s(embedding, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName))
if err != nil {
p.logger.Error("failed to prep a stmt", "error", err)
return err
}
defer stmt.Close()
serializedEmbeddings := SerializeVector(row.Embeddings) serializedEmbeddings := SerializeVector(row.Embeddings)
if err := stmt.BindBlob(1, serializedEmbeddings); err != nil {
p.logger.Error("failed to bind", "error", err) query := fmt.Sprintf("INSERT INTO %s(embedding, slug, raw_text, filename) VALUES (?, ?, ?, ?)", tableName)
return err _, err = p.db.Exec(query, serializedEmbeddings, row.Slug, row.RawText, row.FileName)
}
if err := stmt.BindText(2, row.Slug); err != nil { return err
p.logger.Error("failed to bind", "error", err)
return err
}
if err := stmt.BindText(3, row.RawText); err != nil {
p.logger.Error("failed to bind", "error", err)
return err
}
if err := stmt.BindText(4, row.FileName); err != nil {
p.logger.Error("failed to bind", "error", err)
return err
}
err = stmt.Exec()
if err != nil {
return err
}
return nil
} }
func decodeUnsafe(bs []byte) []float32 { func decodeUnsafe(bs []byte) []float32 {
@@ -110,30 +88,30 @@ func (p ProviderSQL) SearchClosest(q []float32) ([]models.VectorRow, error) {
func (p ProviderSQL) ListFiles() ([]string, error) { func (p ProviderSQL) ListFiles() ([]string, error) {
q := fmt.Sprintf("SELECT filename FROM %s GROUP BY filename", vecTableName384) q := fmt.Sprintf("SELECT filename FROM %s GROUP BY filename", vecTableName384)
stmt, _, err := p.s3Conn.Prepare(q) rows, err := p.db.Query(q)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer stmt.Close() defer rows.Close()
resp := []string{} resp := []string{}
for stmt.Step() { for rows.Next() {
resp = append(resp, stmt.ColumnText(0)) var filename string
if err := rows.Scan(&filename); err != nil {
return nil, err
}
resp = append(resp, filename)
} }
if err := stmt.Err(); err != nil {
if err := rows.Err(); err != nil {
return nil, err return nil, err
} }
return resp, nil return resp, nil
} }
func (p ProviderSQL) RemoveEmbByFileName(filename string) error { func (p ProviderSQL) RemoveEmbByFileName(filename string) error {
q := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", vecTableName384) q := fmt.Sprintf("DELETE FROM %s WHERE filename = ?", vecTableName384)
stmt, _, err := p.s3Conn.Prepare(q) _, err := p.db.Exec(q, filename)
if err != nil { return err
return err
}
defer stmt.Close()
if err := stmt.BindText(1, filename); err != nil {
return err
}
return stmt.Exec()
} }