-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpool.go
151 lines (133 loc) · 2.53 KB
/
pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
package gtcp
import (
"net"
"sync"
"sync/atomic"
)
var (
pool = new(Pool)
poolMu sync.RWMutex
isPoolOpen uint32
)
// Pool Container
// Contain TCPCtrl pool, Actor pool and TCPConn pool
type Pool struct {
ctrls chan *TCPCtrl
actors chan Actor
conns chan *TCPConn
}
// Get a TCPCtrl pointer from pool
func GetCtrlFromPool(actor Actor) (tcpCtrl *TCPCtrl, ok bool) {
if IsPoolOpen() {
select {
case tcpCtrl = <-pool.ctrls:
tcpCtrl.InstallActor(actor)
return tcpCtrl, true
default:
}
}
return
}
// Get an Actor pointer from pool
func GetActorFromPool() (actor Actor, ok bool) {
if IsPoolOpen() {
select {
case actor = <-pool.actors:
return actor, true
default:
}
}
return
}
// Get a TCPConn pointer from pool
func GetConnFromPool(conn *net.TCPConn) (tcpConn *TCPConn, ok bool) {
if IsPoolOpen() || IsConnPoolOpen() {
select {
case tcpConn = <-pool.conns:
tcpConn.ReInstallNetConn(conn)
ok = true
case tcpConn = <-connP:
tcpConn.ReInstallNetConn(conn)
ok = true
default:
}
}
return
}
// Send a TCPCtrl pointer to pool
func SendCtrlToPool(ctrl *TCPCtrl) {
if IsPoolOpen() {
select {
case pool.ctrls <- ctrl:
default:
}
}
}
// Send an Actor pointer to pool
func SendActorToPool(actor Actor) {
if IsPoolOpen() {
select {
case pool.actors <- actor:
default:
}
}
}
// Send a TCPConn pointer to pool
func SendConnToPool(conn *TCPConn) {
if IsPoolOpen() || IsConnPoolOpen() {
select {
case <-conn.Context.Done():
select {
case pool.conns <- conn:
case connP <- conn:
default:
}
default:
}
}
}
// Get TCPCtrl pool instance
func (p *Pool) GetCtrls() <-chan *TCPCtrl {
return p.ctrls
}
// Get Actor pool instance
func (p *Pool) GetActors() <-chan Actor {
return p.actors
}
// Get TCPConn pool instance
func (p *Pool) GetConns() <-chan *TCPConn {
return p.conns
}
// Get pool container
func GetPool() *Pool {
return pool
}
// receive size and open pool
func OpenPool(size uint) {
if !IsPoolOpen() {
poolMu.Lock()
pool.ctrls = make(chan *TCPCtrl, size)
pool.actors = make(chan Actor, size)
pool.conns = make(chan *TCPConn, size)
poolMu.Unlock()
atomic.StoreUint32(&isPoolOpen, 1)
}
}
// return true if container is open else false
func IsPoolOpen() bool {
poolMu.RLock()
defer poolMu.RUnlock()
return atomic.LoadUint32(&isPoolOpen) != 0
}
// reopen container
func ReopenPool(size uint) {
DropPool()
OpenPool(size)
}
// drop container
func DropPool() {
poolMu.Lock()
defer poolMu.Unlock()
pool = new(Pool)
atomic.StoreUint32(&isPoolOpen, 0)
}