ax(mining): rename mu to mutex in SupervisedTask and TaskSupervisor
AX Principle 1: predictable names over short names. The field `mu` is an abbreviation that requires context to understand; `mutex` is self-documenting. Co-Authored-By: Charon <charon@lethean.io>
This commit is contained in:
parent
1c1b2cadf1
commit
d65a1995be
1 changed files with 26 additions and 26 deletions
|
|
@ -21,7 +21,7 @@ type SupervisedTask struct {
|
|||
running bool
|
||||
lastStartTime time.Time
|
||||
cancel context.CancelFunc
|
||||
mu sync.Mutex
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// supervisor := NewTaskSupervisor()
|
||||
|
|
@ -32,7 +32,7 @@ type TaskSupervisor struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
waitGroup sync.WaitGroup
|
||||
mu sync.RWMutex
|
||||
mutex sync.RWMutex
|
||||
started bool
|
||||
}
|
||||
|
||||
|
|
@ -51,8 +51,8 @@ func NewTaskSupervisor() *TaskSupervisor {
|
|||
// supervisor.RegisterTask("stats-collector", collectStats, 5*time.Second, -1)
|
||||
// supervisor.RegisterTask("cleanup", runCleanup, 30*time.Second, 10)
|
||||
func (s *TaskSupervisor) RegisterTask(name string, task TaskFunc, restartDelay time.Duration, maxRestarts int) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.tasks[name] = &SupervisedTask{
|
||||
name: name,
|
||||
|
|
@ -64,26 +64,26 @@ func (s *TaskSupervisor) RegisterTask(name string, task TaskFunc, restartDelay t
|
|||
|
||||
// supervisor.Start() // begins all registered tasks; no-op if already started
|
||||
func (s *TaskSupervisor) Start() {
|
||||
s.mu.Lock()
|
||||
s.mutex.Lock()
|
||||
if s.started {
|
||||
s.mu.Unlock()
|
||||
s.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
s.started = true
|
||||
s.mu.Unlock()
|
||||
s.mutex.Unlock()
|
||||
|
||||
s.mu.RLock()
|
||||
s.mutex.RLock()
|
||||
for name, task := range s.tasks {
|
||||
s.startTask(name, task)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
s.mutex.RUnlock()
|
||||
}
|
||||
|
||||
// s.startTask("stats-collector", s.tasks["stats-collector"])
|
||||
func (s *TaskSupervisor) startTask(name string, st *SupervisedTask) {
|
||||
st.mu.Lock()
|
||||
st.mutex.Lock()
|
||||
if st.running {
|
||||
st.mu.Unlock()
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
st.running = true
|
||||
|
|
@ -91,7 +91,7 @@ func (s *TaskSupervisor) startTask(name string, st *SupervisedTask) {
|
|||
|
||||
taskCtx, taskCancel := context.WithCancel(s.ctx)
|
||||
st.cancel = taskCancel
|
||||
st.mu.Unlock()
|
||||
st.mutex.Unlock()
|
||||
|
||||
s.waitGroup.Add(1)
|
||||
go func() {
|
||||
|
|
@ -118,11 +118,11 @@ func (s *TaskSupervisor) startTask(name string, st *SupervisedTask) {
|
|||
}()
|
||||
|
||||
// Check if we should restart
|
||||
st.mu.Lock()
|
||||
st.mutex.Lock()
|
||||
st.restartCount++
|
||||
shouldRestart := st.restartCount <= st.maxRestarts || st.maxRestarts < 0
|
||||
restartDelay := st.restartDelay
|
||||
st.mu.Unlock()
|
||||
st.mutex.Unlock()
|
||||
|
||||
if !shouldRestart {
|
||||
logging.Warn("supervised task reached max restarts", logging.Fields{
|
||||
|
|
@ -152,48 +152,48 @@ func (s *TaskSupervisor) Stop() {
|
|||
s.cancel()
|
||||
s.waitGroup.Wait()
|
||||
|
||||
s.mu.Lock()
|
||||
s.mutex.Lock()
|
||||
s.started = false
|
||||
for _, task := range s.tasks {
|
||||
task.mu.Lock()
|
||||
task.mutex.Lock()
|
||||
task.running = false
|
||||
task.mu.Unlock()
|
||||
task.mutex.Unlock()
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.mutex.Unlock()
|
||||
|
||||
logging.Info("task supervisor stopped")
|
||||
}
|
||||
|
||||
// running, restarts, ok := supervisor.GetTaskStatus("stats-collector")
|
||||
func (s *TaskSupervisor) GetTaskStatus(name string) (running bool, restartCount int, found bool) {
|
||||
s.mu.RLock()
|
||||
s.mutex.RLock()
|
||||
task, ok := s.tasks[name]
|
||||
s.mu.RUnlock()
|
||||
s.mutex.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return false, 0, false
|
||||
}
|
||||
|
||||
task.mu.Lock()
|
||||
defer task.mu.Unlock()
|
||||
task.mutex.Lock()
|
||||
defer task.mutex.Unlock()
|
||||
return task.running, task.restartCount, true
|
||||
}
|
||||
|
||||
// for name, status := range supervisor.GetAllTaskStatuses() { log(name, status.Running) }
|
||||
func (s *TaskSupervisor) GetAllTaskStatuses() map[string]TaskStatus {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
statuses := make(map[string]TaskStatus, len(s.tasks))
|
||||
for name, task := range s.tasks {
|
||||
task.mu.Lock()
|
||||
task.mutex.Lock()
|
||||
statuses[name] = TaskStatus{
|
||||
Name: name,
|
||||
Running: task.running,
|
||||
RestartCount: task.restartCount,
|
||||
LastStart: task.lastStartTime,
|
||||
}
|
||||
task.mu.Unlock()
|
||||
task.mutex.Unlock()
|
||||
}
|
||||
return statuses
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue