Enha: or free models

This commit is contained in:
Grail Finder
2025-09-05 15:10:40 +03:00
parent 53dc5a5e8d
commit 0276000bfa
4 changed files with 195 additions and 35 deletions

View File

@@ -4,8 +4,8 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"flag" "flag"
"fmt"
"io" "io"
"log/slog" "log/slog"
"net/http" "net/http"
@@ -243,7 +243,7 @@ func callLLM(prompt string, apiURL string) ([]byte, error) {
} }
} }
maxRetries := 6 maxRetries := 3
baseDelay := 2 * time.Second baseDelay := 2 * time.Second
for attempt := 0; attempt < maxRetries; attempt++ { for attempt := 0; attempt < maxRetries; attempt++ {

View File

@@ -74,6 +74,49 @@ type DSResp struct {
Object string `json:"object"` Object string `json:"object"`
} }
// OpenRouter chat completions response (supports tool calls)
type ORChatRespChoice struct {
Index int `json:"index"`
Message struct {
Role string `json:"role"`
Content string `json:"content"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
} `json:"message"`
FinishReason string `json:"finish_reason"`
}
type ORChatResp struct {
ID string `json:"id"`
Choices []ORChatRespChoice `json:"choices"`
Created int `json:"created"`
Model string `json:"model"`
Object string `json:"object"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage"`
}
// OpenRouter completions response (text only)
type ORCompletionResp struct {
ID string `json:"id"`
Object string `json:"object"`
Created int `json:"created"`
Model string `json:"model"`
Choices []struct {
Text string `json:"text"`
Index int `json:"index"`
Logprobs any `json:"logprobs"`
FinishReason string `json:"finish_reason"`
} `json:"choices"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage"`
}
type LLMResp struct { type LLMResp struct {
Index int `json:"index"` Index int `json:"index"`
Content string `json:"content"` Content string `json:"content"`

86
or.go Normal file
View File

@@ -0,0 +1,86 @@
package main
import (
"encoding/json"
"fmt"
"net/http"
)
var (
ormodelsLink = "https://openrouter.ai/api/v1/models"
ORFreeModels = []string{
"google/gemini-2.0-flash-exp:free",
"deepseek/deepseek-chat-v3-0324:free",
"mistralai/mistral-small-3.2-24b-instruct:free",
"qwen/qwen3-14b:free",
"google/gemma-3-27b-it:free",
"meta-llama/llama-3.3-70b-instruct:free",
}
)
type ORModel struct {
ID string `json:"id"`
CanonicalSlug string `json:"canonical_slug"`
HuggingFaceID string `json:"hugging_face_id"`
Name string `json:"name"`
Created int `json:"created"`
Description string `json:"description"`
ContextLength int `json:"context_length"`
Architecture struct {
Modality string `json:"modality"`
InputModalities []string `json:"input_modalities"`
OutputModalities []string `json:"output_modalities"`
Tokenizer string `json:"tokenizer"`
InstructType any `json:"instruct_type"`
} `json:"architecture"`
Pricing struct {
Prompt string `json:"prompt"`
Completion string `json:"completion"`
Request string `json:"request"`
Image string `json:"image"`
Audio string `json:"audio"`
WebSearch string `json:"web_search"`
InternalReasoning string `json:"internal_reasoning"`
} `json:"pricing,omitempty"`
TopProvider struct {
ContextLength int `json:"context_length"`
MaxCompletionTokens int `json:"max_completion_tokens"`
IsModerated bool `json:"is_moderated"`
} `json:"top_provider"`
PerRequestLimits any `json:"per_request_limits"`
SupportedParameters []string `json:"supported_parameters"`
}
// https://openrouter.ai/api/v1/models
type ORModels struct {
Data []ORModel `json:"data"`
}
func (orm *ORModels) ListFree() []string {
resp := []string{}
for _, model := range orm.Data {
if model.Pricing.Prompt == "0" && model.Pricing.Request == "0" &&
model.Pricing.Completion == "0" {
resp = append(resp, model.ID)
}
}
return resp
}
func ListORModels() ([]string, error) {
resp, err := http.Get(ormodelsLink)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
err := fmt.Errorf("failed to fetch or models; status: %s", resp.Status)
return nil, err
}
data := &ORModels{}
if err := json.NewDecoder(resp.Body).Decode(data); err != nil {
return nil, err
}
freeModels := data.ListFree()
return freeModels, nil
}

View File

@@ -152,68 +152,99 @@ func (p *lcpRespParser) MakePayload(prompt string) io.Reader {
type openRouterParser struct { type openRouterParser struct {
log *slog.Logger log *slog.Logger
modelIndex uint32 modelIndex uint32
useChatAPI bool
supportsTools bool
} }
func NewOpenRouterParser(log *slog.Logger) *openRouterParser { func NewOpenRouterParser(log *slog.Logger) *openRouterParser {
return &openRouterParser{ return &openRouterParser{
log: log, log: log,
modelIndex: 0, modelIndex: 0,
useChatAPI: false, // Default to completion API which is more widely supported
supportsTools: false, // Don't assume tool support
} }
} }
func (p *openRouterParser) ParseBytes(body []byte) (string, error) { func (p *openRouterParser) ParseBytes(body []byte) (string, error) {
// parsing logic here // If using chat API, parse as chat completion response (supports tool calls)
resp := models.DSResp{} if p.useChatAPI {
if err := json.Unmarshal(body, &resp); err != nil { resp := models.ORChatResp{}
p.log.Error("failed to unmarshal", "error", err) if err := json.Unmarshal(body, &resp); err != nil {
return "", err p.log.Error("failed to unmarshal openrouter chat response", "error", err)
} return "", err
if len(resp.Choices) == 0 { }
p.log.Error("empty choices", "resp", resp) if len(resp.Choices) == 0 {
err := errors.New("empty choices in resp") p.log.Error("empty choices in openrouter chat response", "resp", resp)
return "", err err := errors.New("empty choices in openrouter chat response")
} return "", err
}
// Check if the response contains tool calls choice := resp.Choices[0]
choice := resp.Choices[0]
// Handle response with message field (OpenAI format) // Check if the response contains tool calls
if choice.Message.Role != "" {
if len(choice.Message.ToolCalls) > 0 { if len(choice.Message.ToolCalls) > 0 {
// Handle tool call response // Handle tool call response
toolCall := choice.Message.ToolCalls[0] toolCall := choice.Message.ToolCalls[0]
// Return a special marker indicating tool usage // Return a special marker indicating tool usage
return fmt.Sprintf("[TOOL_CALL:%s]", toolCall.Function.Name), nil return fmt.Sprintf("[TOOL_CALL:%s]", toolCall.Function.Name), nil
} }
// Regular text response // Regular text response
return choice.Message.Content, nil return choice.Message.Content, nil
} }
// Handle response with text field (legacy format) // If using completion API, parse as text completion response (no tool calls)
return choice.Text, nil resp := models.ORCompletionResp{}
if err := json.Unmarshal(body, &resp); err != nil {
p.log.Error("failed to unmarshal openrouter completion response", "error", err)
return "", err
}
if len(resp.Choices) == 0 {
p.log.Error("empty choices in openrouter completion response", "resp", resp)
err := errors.New("empty choices in openrouter completion response")
return "", err
}
// Return the text content
return resp.Choices[0].Text, nil
} }
func (p *openRouterParser) MakePayload(prompt string) io.Reader { func (p *openRouterParser) MakePayload(prompt string) io.Reader {
// Models to rotate through if p.useChatAPI {
// TODO: to config // Use chat completions API with messages format (supports tool calls)
model := "deepseek/deepseek-r1:free" payload := struct {
// Get next model index using atomic addition for thread safety Model string `json:"model"`
p.modelIndex++ Messages []models.RoleMsg `json:"messages"`
}{
Model: "openai/gpt-4o-mini",
Messages: []models.RoleMsg{
{Role: "user", Content: prompt},
},
}
b, err := json.Marshal(payload)
if err != nil {
p.log.Error("failed to marshal openrouter chat payload", "error", err)
return nil
}
p.log.Debug("made openrouter chat payload", "payload", string(b))
return bytes.NewReader(b)
}
// Use completions API with prompt format (no tool calls)
payload := struct { payload := struct {
Model string `json:"model"` Model string `json:"model"`
Prompt string `json:"prompt"` Prompt string `json:"prompt"`
Tools []models.Tool `json:"tools,omitempty"`
}{ }{
Model: model, Model: "openai/gpt-4o-mini",
Prompt: prompt, Prompt: prompt,
Tools: baseTools, // Include the tools in the request
} }
b, err := json.Marshal(payload) b, err := json.Marshal(payload)
if err != nil { if err != nil {
p.log.Error("failed to marshal openrouter payload", "error", err) p.log.Error("failed to marshal openrouter completion payload", "error", err)
return nil return nil
} }
p.log.Debug("made openrouter payload", "model", model, "payload", string(b)) p.log.Debug("made openrouter completion payload", "payload", string(b))
return bytes.NewReader(b) return bytes.NewReader(b)
} }