Merge pull request 'fix(bugseti): workspace TTL sweeper and configurable limits (#54)' (#157) from fix/54-workspace-ttl-cleanup into new
This commit is contained in:
commit
b779c5ece0
3 changed files with 185 additions and 22 deletions
|
|
@ -17,10 +17,12 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// maxWorkspaces is the upper bound on cached workspace entries.
|
||||
maxWorkspaces = 100
|
||||
// workspaceTTL is how long a workspace stays in memory before eviction.
|
||||
workspaceTTL = 24 * time.Hour
|
||||
// defaultMaxWorkspaces is the fallback upper bound when config is unavailable.
|
||||
defaultMaxWorkspaces = 100
|
||||
// defaultWorkspaceTTL is the fallback TTL when config is unavailable.
|
||||
defaultWorkspaceTTL = 24 * time.Hour
|
||||
// sweepInterval is how often the background sweeper runs.
|
||||
sweepInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// WorkspaceService manages DataNode-backed workspaces for issues.
|
||||
|
|
@ -28,8 +30,10 @@ const (
|
|||
// snapshotted, packaged as a TIM container, or shipped as a crash report.
|
||||
type WorkspaceService struct {
|
||||
config *bugseti.ConfigService
|
||||
workspaces map[string]*Workspace // issue ID → workspace
|
||||
workspaces map[string]*Workspace // issue ID -> workspace
|
||||
mu sync.RWMutex
|
||||
done chan struct{} // signals the background sweeper to stop
|
||||
stopped chan struct{} // closed when the sweeper goroutine exits
|
||||
}
|
||||
|
||||
// Workspace tracks a DataNode-backed workspace for an issue.
|
||||
|
|
@ -55,10 +59,13 @@ type CrashReport struct {
|
|||
}
|
||||
|
||||
// NewWorkspaceService creates a new WorkspaceService.
|
||||
// Call Start() to begin the background TTL sweeper.
|
||||
func NewWorkspaceService(config *bugseti.ConfigService) *WorkspaceService {
|
||||
return &WorkspaceService{
|
||||
config: config,
|
||||
workspaces: make(map[string]*Workspace),
|
||||
done: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -67,6 +74,56 @@ func (w *WorkspaceService) ServiceName() string {
|
|||
return "WorkspaceService"
|
||||
}
|
||||
|
||||
// Start launches the background sweeper goroutine that periodically
|
||||
// evicts expired workspaces. This prevents unbounded map growth even
|
||||
// when no new Capture calls arrive.
|
||||
func (w *WorkspaceService) Start() {
|
||||
go func() {
|
||||
defer close(w.stopped)
|
||||
ticker := time.NewTicker(sweepInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
w.mu.Lock()
|
||||
evicted := w.cleanup()
|
||||
w.mu.Unlock()
|
||||
if evicted > 0 {
|
||||
log.Printf("Workspace sweeper: evicted %d stale entries, %d remaining", evicted, w.ActiveWorkspaces())
|
||||
}
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Printf("Workspace sweeper started (interval=%s, ttl=%s, max=%d)",
|
||||
sweepInterval, w.ttl(), w.maxCap())
|
||||
}
|
||||
|
||||
// Stop signals the background sweeper to exit and waits for it to finish.
|
||||
func (w *WorkspaceService) Stop() {
|
||||
close(w.done)
|
||||
<-w.stopped
|
||||
log.Printf("Workspace sweeper stopped")
|
||||
}
|
||||
|
||||
// ttl returns the configured workspace TTL, falling back to the default.
|
||||
func (w *WorkspaceService) ttl() time.Duration {
|
||||
if w.config != nil {
|
||||
return w.config.GetWorkspaceTTL()
|
||||
}
|
||||
return defaultWorkspaceTTL
|
||||
}
|
||||
|
||||
// maxCap returns the configured max workspace count, falling back to the default.
|
||||
func (w *WorkspaceService) maxCap() int {
|
||||
if w.config != nil {
|
||||
return w.config.GetMaxWorkspaces()
|
||||
}
|
||||
return defaultMaxWorkspaces
|
||||
}
|
||||
|
||||
// Capture loads a filesystem workspace into a DataNode Medium.
|
||||
// Call this after git clone to create the in-memory snapshot.
|
||||
func (w *WorkspaceService) Capture(issue *bugseti.Issue, diskPath string) error {
|
||||
|
|
@ -251,18 +308,23 @@ func (w *WorkspaceService) SaveCrashReport(report *CrashReport) (string, error)
|
|||
|
||||
// cleanup evicts expired workspaces and enforces the max size cap.
|
||||
// Must be called with w.mu held for writing.
|
||||
func (w *WorkspaceService) cleanup() {
|
||||
// Returns the number of evicted entries.
|
||||
func (w *WorkspaceService) cleanup() int {
|
||||
now := time.Now()
|
||||
ttl := w.ttl()
|
||||
cap := w.maxCap()
|
||||
evicted := 0
|
||||
|
||||
// First pass: evict entries older than TTL.
|
||||
for id, ws := range w.workspaces {
|
||||
if now.Sub(ws.CreatedAt) > workspaceTTL {
|
||||
if now.Sub(ws.CreatedAt) > ttl {
|
||||
delete(w.workspaces, id)
|
||||
evicted++
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: if still over cap, evict oldest entries.
|
||||
if len(w.workspaces) > maxWorkspaces {
|
||||
if len(w.workspaces) > cap {
|
||||
type entry struct {
|
||||
id string
|
||||
createdAt time.Time
|
||||
|
|
@ -274,11 +336,14 @@ func (w *WorkspaceService) cleanup() {
|
|||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].createdAt.Before(entries[j].createdAt)
|
||||
})
|
||||
evict := len(w.workspaces) - maxWorkspaces
|
||||
for i := 0; i < evict; i++ {
|
||||
toEvict := len(w.workspaces) - cap
|
||||
for i := 0; i < toEvict; i++ {
|
||||
delete(w.workspaces, entries[i].id)
|
||||
evicted++
|
||||
}
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// Release removes a workspace from memory.
|
||||
|
|
|
|||
|
|
@ -33,9 +33,11 @@ func TestCleanup_TTL(t *testing.T) {
|
|||
func TestCleanup_MaxSize(t *testing.T) {
|
||||
svc := NewWorkspaceService(bugseti.NewConfigService())
|
||||
|
||||
maxCap := svc.maxCap()
|
||||
|
||||
// Fill beyond the cap with fresh entries.
|
||||
svc.mu.Lock()
|
||||
for i := 0; i < maxWorkspaces+20; i++ {
|
||||
for i := 0; i < maxCap+20; i++ {
|
||||
svc.workspaces[fmt.Sprintf("ws-%d", i)] = &Workspace{
|
||||
CreatedAt: time.Now().Add(-time.Duration(i) * time.Minute),
|
||||
}
|
||||
|
|
@ -43,30 +45,28 @@ func TestCleanup_MaxSize(t *testing.T) {
|
|||
svc.cleanup()
|
||||
svc.mu.Unlock()
|
||||
|
||||
if got := svc.ActiveWorkspaces(); got != maxWorkspaces {
|
||||
t.Errorf("expected %d workspaces after cap cleanup, got %d", maxWorkspaces, got)
|
||||
if got := svc.ActiveWorkspaces(); got != maxCap {
|
||||
t.Errorf("expected %d workspaces after cap cleanup, got %d", maxCap, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanup_EvictsOldestWhenOverCap(t *testing.T) {
|
||||
svc := NewWorkspaceService(bugseti.NewConfigService())
|
||||
|
||||
// Create maxWorkspaces+1 entries; the newest should survive.
|
||||
maxCap := svc.maxCap()
|
||||
|
||||
// Create maxCap+1 entries; the newest should survive.
|
||||
svc.mu.Lock()
|
||||
for i := 0; i <= maxWorkspaces; i++ {
|
||||
for i := 0; i <= maxCap; i++ {
|
||||
svc.workspaces[fmt.Sprintf("ws-%d", i)] = &Workspace{
|
||||
CreatedAt: time.Now().Add(-time.Duration(maxWorkspaces-i) * time.Minute),
|
||||
CreatedAt: time.Now().Add(-time.Duration(maxCap-i) * time.Minute),
|
||||
}
|
||||
}
|
||||
svc.cleanup()
|
||||
svc.mu.Unlock()
|
||||
|
||||
// The newest entry (ws-<maxWorkspaces>) should still exist.
|
||||
newest := fmt.Sprintf("ws-%d", maxWorkspaces)
|
||||
if m := svc.GetMedium(newest); m != nil {
|
||||
// GetMedium returns nil for entries with nil Medium, which is expected here.
|
||||
// We just want to verify the key still exists.
|
||||
}
|
||||
// The newest entry (ws-<maxCap>) should still exist.
|
||||
newest := fmt.Sprintf("ws-%d", maxCap)
|
||||
|
||||
svc.mu.RLock()
|
||||
_, exists := svc.workspaces[newest]
|
||||
|
|
@ -83,3 +83,69 @@ func TestCleanup_EvictsOldestWhenOverCap(t *testing.T) {
|
|||
t.Error("expected oldest workspace to be evicted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanup_ReturnsEvictedCount(t *testing.T) {
|
||||
svc := NewWorkspaceService(bugseti.NewConfigService())
|
||||
|
||||
svc.mu.Lock()
|
||||
for i := 0; i < 3; i++ {
|
||||
svc.workspaces[fmt.Sprintf("old-%d", i)] = &Workspace{
|
||||
CreatedAt: time.Now().Add(-25 * time.Hour),
|
||||
}
|
||||
}
|
||||
svc.workspaces["fresh"] = &Workspace{
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
evicted := svc.cleanup()
|
||||
svc.mu.Unlock()
|
||||
|
||||
if evicted != 3 {
|
||||
t.Errorf("expected 3 evicted entries, got %d", evicted)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStop(t *testing.T) {
|
||||
svc := NewWorkspaceService(bugseti.NewConfigService())
|
||||
svc.Start()
|
||||
|
||||
// Add a stale entry while the sweeper is running.
|
||||
svc.mu.Lock()
|
||||
svc.workspaces["stale"] = &Workspace{
|
||||
CreatedAt: time.Now().Add(-25 * time.Hour),
|
||||
}
|
||||
svc.mu.Unlock()
|
||||
|
||||
// Stop should return without hanging.
|
||||
svc.Stop()
|
||||
}
|
||||
|
||||
func TestConfigurableTTL(t *testing.T) {
|
||||
cfg := bugseti.NewConfigService()
|
||||
svc := NewWorkspaceService(cfg)
|
||||
|
||||
// Default TTL should be 24h (1440 minutes).
|
||||
if got := svc.ttl(); got != 24*time.Hour {
|
||||
t.Errorf("expected default TTL of 24h, got %s", got)
|
||||
}
|
||||
|
||||
// Default max cap should be 100.
|
||||
if got := svc.maxCap(); got != 100 {
|
||||
t.Errorf("expected default max cap of 100, got %d", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilConfigFallback(t *testing.T) {
|
||||
svc := &WorkspaceService{
|
||||
config: nil,
|
||||
workspaces: make(map[string]*Workspace),
|
||||
done: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
|
||||
if got := svc.ttl(); got != defaultWorkspaceTTL {
|
||||
t.Errorf("expected fallback TTL %s, got %s", defaultWorkspaceTTL, got)
|
||||
}
|
||||
if got := svc.maxCap(); got != defaultMaxWorkspaces {
|
||||
t.Errorf("expected fallback max cap %d, got %d", defaultMaxWorkspaces, got)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,6 +52,10 @@ type Config struct {
|
|||
MaxConcurrentIssues int `json:"maxConcurrentIssues"`
|
||||
AutoSeedContext bool `json:"autoSeedContext"`
|
||||
|
||||
// Workspace cache
|
||||
MaxWorkspaces int `json:"maxWorkspaces"` // Upper bound on cached workspace entries (0 = default 100)
|
||||
WorkspaceTTLMinutes int `json:"workspaceTtlMinutes"` // TTL for workspace entries in minutes (0 = default 1440 = 24h)
|
||||
|
||||
// Updates
|
||||
UpdateChannel string `json:"updateChannel"` // stable, beta, nightly
|
||||
AutoUpdate bool `json:"autoUpdate"` // Automatically install updates
|
||||
|
|
@ -99,6 +103,8 @@ func NewConfigService() *ConfigService {
|
|||
AutoSeedContext: true,
|
||||
DataDir: bugsetiDir,
|
||||
MarketplaceMCPRoot: "",
|
||||
MaxWorkspaces: 100,
|
||||
WorkspaceTTLMinutes: 1440, // 24 hours
|
||||
UpdateChannel: "stable",
|
||||
AutoUpdate: false,
|
||||
UpdateCheckInterval: 6, // Check every 6 hours
|
||||
|
|
@ -169,6 +175,12 @@ func (c *ConfigService) mergeDefaults(config *Config) {
|
|||
if config.DataDir == "" {
|
||||
config.DataDir = c.config.DataDir
|
||||
}
|
||||
if config.MaxWorkspaces == 0 {
|
||||
config.MaxWorkspaces = 100
|
||||
}
|
||||
if config.WorkspaceTTLMinutes == 0 {
|
||||
config.WorkspaceTTLMinutes = 1440
|
||||
}
|
||||
if config.UpdateChannel == "" {
|
||||
config.UpdateChannel = "stable"
|
||||
}
|
||||
|
|
@ -406,6 +418,26 @@ func (c *ConfigService) SetAutoSeedEnabled(enabled bool) error {
|
|||
return c.saveUnsafe()
|
||||
}
|
||||
|
||||
// GetMaxWorkspaces returns the maximum number of cached workspaces.
|
||||
func (c *ConfigService) GetMaxWorkspaces() int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if c.config.MaxWorkspaces <= 0 {
|
||||
return 100
|
||||
}
|
||||
return c.config.MaxWorkspaces
|
||||
}
|
||||
|
||||
// GetWorkspaceTTL returns the workspace TTL as a time.Duration.
|
||||
func (c *ConfigService) GetWorkspaceTTL() time.Duration {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
if c.config.WorkspaceTTLMinutes <= 0 {
|
||||
return 24 * time.Hour
|
||||
}
|
||||
return time.Duration(c.config.WorkspaceTTLMinutes) * time.Minute
|
||||
}
|
||||
|
||||
// UpdateSettings holds update-related configuration.
|
||||
type UpdateSettings struct {
|
||||
Channel string `json:"channel"`
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue