-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathchan_queue.go
80 lines (67 loc) · 1.59 KB
/
chan_queue.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
package queue
import (
"sync"
"sync/atomic"
)
// NewQueue create a queue that specifies the number of buffers and the number of worker threads
func NewQueue(maxCapacity, maxThread int) *Queue {
return &Queue{
jobQueue: make(chan Jober, maxCapacity),
maxWorkers: maxThread,
workerPool: make(chan chan Jober, maxThread),
workers: make([]*worker, maxThread),
wg: new(sync.WaitGroup),
}
}
// Queue a task queue for mitigating server pressure in high concurrency situations
// and improving task processing
type Queue struct {
maxWorkers int
jobQueue chan Jober
workerPool chan chan Jober
workers []*worker
running uint32
wg *sync.WaitGroup
}
// Run start running queues
func (q *Queue) Run() {
if atomic.LoadUint32(&q.running) == 1 {
return
}
atomic.StoreUint32(&q.running, 1)
for i := 0; i < q.maxWorkers; i++ {
q.workers[i] = newWorker(q.workerPool, q.wg)
q.workers[i].Start()
}
go q.dispatcher()
}
func (q *Queue) dispatcher() {
for job := range q.jobQueue {
worker := <-q.workerPool
worker <- job
}
}
// Terminate terminate the queue to receive the task and release the resource
func (q *Queue) Terminate() {
if atomic.LoadUint32(&q.running) != 1 {
return
}
atomic.StoreUint32(&q.running, 0)
q.wg.Wait()
close(q.jobQueue)
for i := 0; i < q.maxWorkers; i++ {
q.workers[i].Stop()
}
close(q.workerPool)
}
// Push put the executable task into the queue
func (q *Queue) Push(job Jober) {
if atomic.LoadUint32(&q.running) != 1 {
return
}
q.wg.Add(1)
q.jobQueue <- job
}
func (q *Queue) GetJobCount() int {
return len(q.jobQueue)
}