feat(llama): return Result from Generate/Chat

Delegates to HTTPBackend which already returns Result.

Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
Snider 2026-02-22 17:37:34 +00:00
parent 8a36bafa83
commit 52f27fc476

View file

@ -113,17 +113,17 @@ func (b *LlamaBackend) Stop() error {
}
// Generate sends a prompt to the managed llama-server.
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (string, error) {
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error) {
if !b.Available() {
return "", log.E("ml.LlamaBackend.Generate", "llama-server not available", nil)
return Result{}, log.E("ml.LlamaBackend.Generate", "llama-server not available", nil)
}
return b.http.Generate(ctx, prompt, opts)
}
// Chat sends a conversation to the managed llama-server.
func (b *LlamaBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (string, error) {
func (b *LlamaBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error) {
if !b.Available() {
return "", log.E("ml.LlamaBackend.Chat", "llama-server not available", nil)
return Result{}, log.E("ml.LlamaBackend.Chat", "llama-server not available", nil)
}
return b.http.Chat(ctx, messages, opts)
}