go-ai/ml/inference.go
Claude e84d6ad3c9
feat: extract AI/ML packages from core/go
LEM scoring pipeline, native MLX Metal bindings, Claude SDK wrapper,
RAG with Qdrant/Ollama, unified AI facade, and MCP protocol server.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-16 15:25:55 +00:00

51 lines
1.5 KiB
Go

// Package ml provides ML inference, scoring, and model management for CoreGo.
//
// It supports multiple inference backends (HTTP, llama-server, Ollama) through
// a common Backend interface, and includes an ethics-aware scoring engine with
// both heuristic and LLM-judge capabilities.
//
// Register as a CoreGo service:
//
// core.New(
// core.WithService(ml.NewService),
// )
package ml
import "context"
// Backend generates text from prompts. Implementations include HTTPBackend
// (OpenAI-compatible API), LlamaBackend (managed llama-server process), and
// OllamaBackend (Ollama native API).
type Backend interface {
// Generate sends a single user prompt and returns the response.
Generate(ctx context.Context, prompt string, opts GenOpts) (string, error)
// Chat sends a multi-turn conversation and returns the response.
Chat(ctx context.Context, messages []Message, opts GenOpts) (string, error)
// Name returns the backend identifier (e.g. "http", "llama", "ollama").
Name() string
// Available reports whether the backend is ready to accept requests.
Available() bool
}
// GenOpts configures a generation request.
type GenOpts struct {
Temperature float64
MaxTokens int
Model string // override model for this request
}
// Message is a single chat message.
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// DefaultGenOpts returns sensible defaults for generation.
func DefaultGenOpts() GenOpts {
return GenOpts{
Temperature: 0.1,
}
}