feat(llama): return Result from Generate/Chat
Delegates to HTTPBackend which already returns Result. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
8a36bafa83
commit
52f27fc476
1 changed files with 4 additions and 4 deletions
|
|
@ -113,17 +113,17 @@ func (b *LlamaBackend) Stop() error {
|
|||
}
|
||||
|
||||
// Generate sends a prompt to the managed llama-server.
|
||||
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (string, error) {
|
||||
func (b *LlamaBackend) Generate(ctx context.Context, prompt string, opts GenOpts) (Result, error) {
|
||||
if !b.Available() {
|
||||
return "", log.E("ml.LlamaBackend.Generate", "llama-server not available", nil)
|
||||
return Result{}, log.E("ml.LlamaBackend.Generate", "llama-server not available", nil)
|
||||
}
|
||||
return b.http.Generate(ctx, prompt, opts)
|
||||
}
|
||||
|
||||
// Chat sends a conversation to the managed llama-server.
|
||||
func (b *LlamaBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (string, error) {
|
||||
func (b *LlamaBackend) Chat(ctx context.Context, messages []Message, opts GenOpts) (Result, error) {
|
||||
if !b.Available() {
|
||||
return "", log.E("ml.LlamaBackend.Chat", "llama-server not available", nil)
|
||||
return Result{}, log.E("ml.LlamaBackend.Chat", "llama-server not available", nil)
|
||||
}
|
||||
return b.http.Chat(ctx, messages, opts)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue