Skip to content

Commit

Permalink
Merge pull request #17 from koykov/parallel_fifo
Browse files Browse the repository at this point in the history
FIFO: implement parallel FIFO engine.
  • Loading branch information
koykov authored Dec 4, 2023
2 parents 030991a + a7efbe9 commit eaac207
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 13 deletions.
3 changes: 3 additions & 0 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ type Config struct {
// Queue capacity.
// Mandatory param if QoS config omitted. QoS (if provided) summing capacity will overwrite this field.
Capacity uint64
// Streams allows to avoid mutex starvation by sharing items among Streams sub-channels instead of one singe
// channel.
Streams uint32
// MaxRetries determines the maximum number of item processing retries.
// If MaxRetries is exceeded, the item will send to DLQ (if possible).
// The initial attempt is not counted as a retry.
Expand Down
70 changes: 70 additions & 0 deletions pfifo.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
package queue

import (
"math"
"sync/atomic"
)

// Parallel FIFO engine implementation.
type pfifo struct {
pool []chan item
c, o, m uint64
}

func (e *pfifo) init(config *Config) error {
var inst uint64
if inst = uint64(config.Streams); inst == 0 {
inst = 1
}
cap_ := config.Capacity / inst
for i := uint64(0); i < inst; i++ {
e.pool = append(e.pool, make(chan item, cap_))
}
e.c, e.o, e.m = math.MaxUint64, math.MaxUint64, inst
return nil
}

func (e *pfifo) enqueue(itm *item, block bool) bool {
idx := atomic.AddUint64(&e.c, 1) % e.m
if !block {
select {
case e.pool[idx] <- *itm:
return true
default:
return false
}
}
e.pool[idx] <- *itm
return true
}

func (e *pfifo) dequeue() (item, bool) {
idx := atomic.AddUint64(&e.o, 1) % e.m
itm, ok := <-e.pool[idx]
return itm, ok
}

func (e *pfifo) dequeueSQ(_ uint32) (item, bool) {
return e.dequeue()
}

func (e *pfifo) size() (r int) {
for i := uint64(0); i < e.m; i++ {
r += len(e.pool[i])
}
return
}

func (e *pfifo) cap() (r int) {
for i := uint64(0); i < e.m; i++ {
r += cap(e.pool[i])
}
return
}

func (e *pfifo) close(_ bool) error {
for i := uint64(0); i < e.m; i++ {
close(e.pool[i])
}
return nil
}
6 changes: 3 additions & 3 deletions pq.go
Original file line number Diff line number Diff line change
Expand Up @@ -336,15 +336,15 @@ type egress struct {
}

func (e *egress) init(conf *qos.EgressConfig) error {
for i := uint32(0); i < conf.Instances; i++ {
for i := uint32(0); i < conf.Streams; i++ {
e.pool = append(e.pool, make(chan item, conf.Capacity))
name := qos.Egress
if conf.Instances > 1 {
if conf.Streams > 1 {
name = fmt.Sprintf("%s%d", qos.Egress, i)
}
e.name = append(e.name, name)
}
e.c, e.o, e.m = math.MaxUint64, math.MaxUint64, uint64(conf.Instances)
e.c, e.o, e.m = math.MaxUint64, math.MaxUint64, uint64(conf.Streams)
return nil
}

Expand Down
18 changes: 9 additions & 9 deletions qos/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ const (
)
const (
defaultEgressCapacity = uint64(64)
defaultEgressInstances = uint32(1)
defaultEgressStreams = uint32(1)
defaultEgressWorkers = uint32(1)
defaultEgressIdleThreshold = uint32(1000)
defaultEgressIdleTimeout = time.Millisecond
Expand All @@ -46,8 +46,8 @@ type EgressConfig struct {
// If this param omit defaultEgressCapacity (64) will use instead.
Capacity uint64
// Amount of separate egress sub-queues.
// If this param omit defaultEgressInstances (1) will use instead.
Instances uint32
// If this param omit defaultEgressStreams (1) will use instead.
Streams uint32
// Count of transit workers between sub-queues and egress sud-queue.
// If this param omit defaultEgressWorkers (1) will use instead.
// Use with caution!
Expand All @@ -65,8 +65,8 @@ func New(algo Algo, eval PriorityEvaluator) *Config {
q := Config{
Algo: algo,
Egress: EgressConfig{
Capacity: defaultEgressCapacity,
Instances: defaultEgressInstances,
Capacity: defaultEgressCapacity,
Streams: defaultEgressStreams,
},
Evaluator: eval,
}
Expand All @@ -88,8 +88,8 @@ func (q *Config) SetEgressCapacity(cap uint64) *Config {
return q
}

func (q *Config) SetEgressInstances(inst uint32) *Config {
q.Egress.Instances = inst
func (q *Config) SetEgressStreams(streams uint32) *Config {
q.Egress.Streams = streams
return q
}

Expand Down Expand Up @@ -127,8 +127,8 @@ func (q *Config) Validate() error {
if q.Egress.Capacity == 0 {
q.Egress.Capacity = defaultEgressCapacity
}
if q.Egress.Instances == 0 {
q.Egress.Instances = defaultEgressInstances
if q.Egress.Streams == 0 {
q.Egress.Streams = defaultEgressStreams
}
if q.Egress.Workers == 0 {
q.Egress.Workers = defaultEgressWorkers
Expand Down
2 changes: 1 addition & 1 deletion qos/readme.ru.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ conf := Config{
`egress` это специальная под-очередь, куда перемещаются элементы согласно алгоритму приоретизации. Настраивается
посредством заполнения под-структуры `EgressConfig` с параметрами:
* `Capacity` - ёмкость, обязательный параметр.
* `Instances` - количество egress очередей для случаев, когда egress является бутылочным горлышком.
* `Streams` - количество egress очередей для случаев, когда egress является бутылочным горлышком.
* `Workers` - количество воркеров, которые перемещают элементы в egress из прочих под-очередей.
* `IdleThreshold` - сколько попыток чтения допустимо предпринять из пустых под-очередей.
* `IdleTimeout` - сколько ждать после превышения `IdleThreshold` прежде чем предпринять ещё одну попытку чтения.
Expand Down
2 changes: 2 additions & 0 deletions queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ func (q *Queue) init() {
}
c.Capacity = c.QoS.SummingCapacity()
q.engine = &pq{}
case c.Streams > 0:
q.engine = &pfifo{}
default:
q.engine = &fifo{}
}
Expand Down

0 comments on commit eaac207

Please sign in to comment.