256 lines
7.3 KiB
Go
256 lines
7.3 KiB
Go
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"grailbench/config"
|
|
"grailbench/models"
|
|
"io"
|
|
"log/slog"
|
|
)
|
|
|
|
type RespParser interface {
|
|
ParseBytes(body []byte) (string, error)
|
|
MakePayload(prompt string) io.Reader
|
|
}
|
|
|
|
// DeepSeekParser: deepseek implementation of RespParser
|
|
type deepSeekParser struct {
|
|
log *slog.Logger
|
|
cfg *config.Config
|
|
}
|
|
|
|
func NewDeepSeekParser(log *slog.Logger, cfg *config.Config) *deepSeekParser {
|
|
return &deepSeekParser{log: log, cfg: cfg}
|
|
}
|
|
|
|
func (p *deepSeekParser) ParseBytes(body []byte) (string, error) {
|
|
// parsing logic here
|
|
dsResp := models.DSResp{}
|
|
if err := json.Unmarshal(body, &dsResp); err != nil {
|
|
p.log.Error("failed to unmarshall", "error", err)
|
|
return "", err
|
|
}
|
|
if len(dsResp.Choices) == 0 {
|
|
p.log.Error("empty choices", "dsResp", dsResp)
|
|
err := errors.New("empty choices in dsResp")
|
|
return "", err
|
|
}
|
|
|
|
// Check if the response contains tool calls
|
|
choice := dsResp.Choices[0]
|
|
|
|
// Handle response with message field (OpenAI format)
|
|
if choice.Message.Role != "" {
|
|
if len(choice.Message.ToolCalls) > 0 {
|
|
// Handle tool call response
|
|
toolCall := choice.Message.ToolCalls[0]
|
|
// Return a special marker indicating tool usage
|
|
return fmt.Sprintf("[TOOL_CALL:%s]", toolCall.Function.Name), nil
|
|
}
|
|
// Regular text response
|
|
return choice.Message.Content, nil
|
|
}
|
|
|
|
// Handle response with text field (legacy format)
|
|
return choice.Text, nil
|
|
}
|
|
|
|
func (p *deepSeekParser) MakePayload(prompt string) io.Reader {
|
|
payload := struct {
|
|
Model string `json:"model"`
|
|
Prompt string `json:"prompt"`
|
|
Echo bool `json:"echo"`
|
|
FrequencyPenalty float64 `json:"frequency_penalty"`
|
|
Logprobs int `json:"logprobs"`
|
|
MaxTokens int `json:"max_tokens"`
|
|
PresencePenalty float64 `json:"presence_penalty"`
|
|
Stop interface{} `json:"stop"`
|
|
Stream bool `json:"stream"`
|
|
StreamOptions interface{} `json:"stream_options"`
|
|
Suffix interface{} `json:"suffix"`
|
|
Temperature float64 `json:"temperature"`
|
|
NProbs int `json:"n_probs"`
|
|
TopP float64 `json:"top_p"`
|
|
Tools []models.Tool `json:"tools,omitempty"`
|
|
}{
|
|
Model: "deepseek-chat",
|
|
Prompt: prompt,
|
|
Echo: false,
|
|
FrequencyPenalty: 0,
|
|
Logprobs: 0,
|
|
MaxTokens: 1024,
|
|
PresencePenalty: 0,
|
|
Stop: nil,
|
|
Stream: false,
|
|
StreamOptions: nil,
|
|
Suffix: nil,
|
|
Temperature: 1,
|
|
NProbs: 10,
|
|
TopP: 1,
|
|
Tools: baseTools, // Include the tools in the request
|
|
}
|
|
b, err := json.Marshal(payload)
|
|
if err != nil {
|
|
p.log.Error("failed to marshal deepseek payload", "error", err)
|
|
return nil
|
|
}
|
|
return bytes.NewReader(b)
|
|
}
|
|
|
|
// llama.cpp implementation of RespParser
|
|
type lcpRespParser struct {
|
|
log *slog.Logger
|
|
cfg *config.Config
|
|
}
|
|
|
|
func NewLCPRespParser(log *slog.Logger, cfg *config.Config) *lcpRespParser {
|
|
return &lcpRespParser{log: log, cfg: cfg}
|
|
}
|
|
|
|
func (p *lcpRespParser) ParseBytes(body []byte) (string, error) {
|
|
// parsing logic here
|
|
resp := models.LLMResp{}
|
|
if err := json.Unmarshal(body, &resp); err != nil {
|
|
p.log.Error("failed to unmarshal", "error", err)
|
|
return "", err
|
|
}
|
|
return resp.Content, nil
|
|
}
|
|
|
|
func (p *lcpRespParser) MakePayload(prompt string) io.Reader {
|
|
payload := struct {
|
|
Model string `json:"model"`
|
|
Prompt string `json:"prompt"`
|
|
FrequencyPenalty float64 `json:"frequency_penalty"`
|
|
MaxTokens int `json:"max_tokens"`
|
|
Stop []string `json:"stop"`
|
|
Stream bool `json:"stream"`
|
|
Temperature float64 `json:"temperature"`
|
|
TopP float64 `json:"top_p"`
|
|
Tools []models.Tool `json:"tools,omitempty"`
|
|
}{
|
|
Model: "local-model",
|
|
Prompt: prompt,
|
|
FrequencyPenalty: 0,
|
|
MaxTokens: 1024,
|
|
Stop: []string{"Q:\n", "A:\n"},
|
|
Stream: false,
|
|
Temperature: 0.4,
|
|
TopP: 1,
|
|
Tools: baseTools, // Include tools (though local model may not support them)
|
|
}
|
|
|
|
b, err := json.Marshal(payload)
|
|
if err != nil {
|
|
// This should not happen for this struct, but good practice to handle.
|
|
p.log.Error("failed to marshal lcp payload", "error", err)
|
|
return nil
|
|
}
|
|
return bytes.NewReader(b)
|
|
}
|
|
|
|
type openRouterParser struct {
|
|
log *slog.Logger
|
|
cfg *config.Config
|
|
modelIndex uint32
|
|
useChatAPI bool
|
|
supportsTools bool
|
|
}
|
|
|
|
func NewOpenRouterParser(log *slog.Logger, cfg *config.Config) *openRouterParser {
|
|
return &openRouterParser{
|
|
log: log,
|
|
cfg: cfg,
|
|
modelIndex: 0,
|
|
useChatAPI: false, // Default to completion API which is more widely supported
|
|
supportsTools: false, // Don't assume tool support
|
|
}
|
|
}
|
|
|
|
func (p *openRouterParser) ParseBytes(body []byte) (string, error) {
|
|
// If using chat API, parse as chat completion response (supports tool calls)
|
|
if p.useChatAPI {
|
|
resp := models.ORChatResp{}
|
|
if err := json.Unmarshal(body, &resp); err != nil {
|
|
p.log.Error("failed to unmarshal openrouter chat response", "error", err)
|
|
return "", err
|
|
}
|
|
if len(resp.Choices) == 0 {
|
|
p.log.Error("empty choices in openrouter chat response", "resp", resp)
|
|
err := errors.New("empty choices in openrouter chat response")
|
|
return "", err
|
|
}
|
|
|
|
choice := resp.Choices[0]
|
|
|
|
// Check if the response contains tool calls
|
|
if len(choice.Message.ToolCalls) > 0 {
|
|
// Handle tool call response
|
|
toolCall := choice.Message.ToolCalls[0]
|
|
// Return a special marker indicating tool usage
|
|
return fmt.Sprintf("[TOOL_CALL:%s]", toolCall.Function.Name), nil
|
|
}
|
|
|
|
// Regular text response
|
|
return choice.Message.Content, nil
|
|
}
|
|
|
|
// If using completion API, parse as text completion response (no tool calls)
|
|
resp := models.ORCompletionResp{}
|
|
if err := json.Unmarshal(body, &resp); err != nil {
|
|
p.log.Error("failed to unmarshal openrouter completion response", "error", err)
|
|
return "", err
|
|
}
|
|
if len(resp.Choices) == 0 {
|
|
p.log.Error("empty choices in openrouter completion response", "resp", resp)
|
|
err := errors.New("empty choices in openrouter completion response")
|
|
return "", err
|
|
}
|
|
|
|
// Return the text content
|
|
return resp.Choices[0].Text, nil
|
|
}
|
|
|
|
func (p *openRouterParser) MakePayload(prompt string) io.Reader {
|
|
if p.useChatAPI {
|
|
// Use chat completions API with messages format (supports tool calls)
|
|
payload := struct {
|
|
Model string `json:"model"`
|
|
Messages []models.RoleMsg `json:"messages"`
|
|
}{
|
|
Model: p.cfg.ModelName,
|
|
Messages: []models.RoleMsg{
|
|
{Role: "user", Content: prompt},
|
|
},
|
|
}
|
|
|
|
b, err := json.Marshal(payload)
|
|
if err != nil {
|
|
p.log.Error("failed to marshal openrouter chat payload", "error", err)
|
|
return nil
|
|
}
|
|
p.log.Debug("made openrouter chat payload", "payload", string(b))
|
|
return bytes.NewReader(b)
|
|
}
|
|
|
|
// Use completions API with prompt format (no tool calls)
|
|
payload := struct {
|
|
Model string `json:"model"`
|
|
Prompt string `json:"prompt"`
|
|
}{
|
|
Model: p.cfg.ModelName,
|
|
Prompt: prompt,
|
|
}
|
|
|
|
b, err := json.Marshal(payload)
|
|
if err != nil {
|
|
p.log.Error("failed to marshal openrouter completion payload", "error", err)
|
|
return nil
|
|
}
|
|
p.log.Debug("made openrouter completion payload", "payload", string(b))
|
|
return bytes.NewReader(b)
|
|
}
|