Enha: llama.cpp on non localhost

This commit is contained in:
Grail Finder
2026-03-07 18:42:12 +03:00
parent c8f00198d6
commit bf655a1087
3 changed files with 25 additions and 28 deletions

37
bot.go
View File

@@ -16,7 +16,6 @@ import (
"log/slog" "log/slog"
"net" "net"
"net/http" "net/http"
"net/url"
"os" "os"
"regexp" "regexp"
"slices" "slices"
@@ -253,12 +252,7 @@ func createClient(connectTimeout time.Duration) *http.Client {
} }
func warmUpModel() { func warmUpModel() {
u, err := url.Parse(cfg.CurrentAPI) if !isLocalLlamacpp() {
if err != nil {
return
}
host := u.Hostname()
if host != "localhost" && host != "127.0.0.1" && host != "::1" {
return return
} }
// Check if model is already loaded // Check if model is already loaded
@@ -1404,20 +1398,21 @@ func updateModelLists() {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
} }
// set already loaded model in llama.cpp // set already loaded model in llama.cpp
if strings.Contains(cfg.CurrentAPI, "localhost") || strings.Contains(cfg.CurrentAPI, "127.0.0.1") { if !isLocalLlamacpp() {
localModelsMu.Lock() return
defer localModelsMu.Unlock() }
for i := range LocalModels { localModelsMu.Lock()
if strings.Contains(LocalModels[i], models.LoadedMark) { defer localModelsMu.Unlock()
m := strings.TrimPrefix(LocalModels[i], models.LoadedMark) for i := range LocalModels {
cfg.CurrentModel = m if strings.Contains(LocalModels[i], models.LoadedMark) {
chatBody.Model = m m := strings.TrimPrefix(LocalModels[i], models.LoadedMark)
cachedModelColor = "green" cfg.CurrentModel = m
updateStatusLine() chatBody.Model = m
updateToolCapabilities() cachedModelColor = "green"
app.Draw() updateStatusLine()
return updateToolCapabilities()
} app.Draw()
return
} }
} }
} }

View File

@@ -5,7 +5,6 @@ import (
"gf-lt/models" "gf-lt/models"
"gf-lt/pngmeta" "gf-lt/pngmeta"
"image" "image"
"net/url"
"os" "os"
"os/exec" "os/exec"
"path" "path"
@@ -323,12 +322,10 @@ func strInSlice(s string, sl []string) bool {
// isLocalLlamacpp checks if the current API is a local llama.cpp instance. // isLocalLlamacpp checks if the current API is a local llama.cpp instance.
func isLocalLlamacpp() bool { func isLocalLlamacpp() bool {
u, err := url.Parse(cfg.CurrentAPI) if strings.Contains(cfg.CurrentAPI, "openrouter") || strings.Contains(cfg.CurrentAPI, "deepseek") {
if err != nil {
return false return false
} }
host := u.Hostname() return true
return host == "localhost" || host == "127.0.0.1" || host == "::1"
} }
// getModelColor returns the cached color tag for the model name. // getModelColor returns the cached color tag for the model name.

9
llm.go
View File

@@ -62,11 +62,11 @@ type ChunkParser interface {
func choseChunkParser() { func choseChunkParser() {
chunkParser = LCPCompletion{} chunkParser = LCPCompletion{}
switch cfg.CurrentAPI { switch cfg.CurrentAPI {
case "http://localhost:8080/completion": case "http://localhost:8080/completion", "http://127.0.0.1:8080/completion":
chunkParser = LCPCompletion{} chunkParser = LCPCompletion{}
logger.Debug("chosen lcpcompletion", "link", cfg.CurrentAPI) logger.Debug("chosen lcpcompletion", "link", cfg.CurrentAPI)
return return
case "http://localhost:8080/v1/chat/completions": case "http://localhost:8080/v1/chat/completions", "http://127.0.0.1:8080/v1/chat/completions":
chunkParser = LCPChat{} chunkParser = LCPChat{}
logger.Debug("chosen lcpchat", "link", cfg.CurrentAPI) logger.Debug("chosen lcpchat", "link", cfg.CurrentAPI)
return return
@@ -87,6 +87,11 @@ func choseChunkParser() {
logger.Debug("chosen openrouterchat", "link", cfg.CurrentAPI) logger.Debug("chosen openrouterchat", "link", cfg.CurrentAPI)
return return
default: default:
logger.Warn("unexpected case, assuming llama.cpp on non default address", "link", cfg.CurrentAPI)
if strings.Contains(cfg.CurrentAPI, "chat") {
chunkParser = LCPChat{}
return
}
chunkParser = LCPCompletion{} chunkParser = LCPCompletion{}
} }
} }