cli/internal/cmd/ai/ratelimit_dispatch.go
Claude 23b82482f2 refactor: rename module from github.com/host-uk/core to forge.lthn.ai/core/cli
Move module identity to our own Forgejo instance. All import paths
updated across 434 Go files, sub-module go.mod files, and go.work.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-16 05:53:52 +00:00

49 lines
1.3 KiB
Go

package ai
import (
"context"
"forge.lthn.ai/core/cli/pkg/log"
"forge.lthn.ai/core/cli/pkg/ratelimit"
)
// executeWithRateLimit wraps an agent execution with rate limiting logic.
// It estimates token usage, waits for capacity, executes the runner, and records usage.
func executeWithRateLimit(ctx context.Context, model, prompt string, runner func() (bool, int, error)) (bool, int, error) {
rl, err := ratelimit.New()
if err != nil {
log.Warn("Failed to initialize rate limiter, proceeding without limits", "error", err)
return runner()
}
if err := rl.Load(); err != nil {
log.Warn("Failed to load rate limit state", "error", err)
}
// Estimate tokens from prompt length (1 token ≈ 4 chars)
estTokens := len(prompt) / 4
if estTokens == 0 {
estTokens = 1
}
log.Info("Checking rate limits", "model", model, "est_tokens", estTokens)
if err := rl.WaitForCapacity(ctx, model, estTokens); err != nil {
return false, -1, err
}
success, exitCode, runErr := runner()
// Record usage with conservative output estimate (actual tokens unknown from shell runner).
outputEst := estTokens / 10
if outputEst < 50 {
outputEst = 50
}
rl.RecordUsage(model, estTokens, outputEst)
if err := rl.Persist(); err != nil {
log.Warn("Failed to persist rate limit state", "error", err)
}
return success, exitCode, runErr
}