@ -7,12 +7,16 @@ package queue
import (
import (
"context"
"context"
"sync"
"sync"
"sync/atomic"
"time"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/log"
)
)
// WorkerPool takes
// WorkerPool represent a dynamically growable worker pool for a
// provided handler function. They have an internal channel which
// they use to detect if there is a block and will grow and shrink in
// response to demand as per configuration.
type WorkerPool struct {
type WorkerPool struct {
lock sync . Mutex
lock sync . Mutex
baseCtx context . Context
baseCtx context . Context
@ -27,10 +31,42 @@ type WorkerPool struct {
blockTimeout time . Duration
blockTimeout time . Duration
boostTimeout time . Duration
boostTimeout time . Duration
boostWorkers int
boostWorkers int
numInQueue int64
}
// WorkerPoolConfiguration is the basic configuration for a WorkerPool
type WorkerPoolConfiguration struct {
QueueLength int
BatchLength int
BlockTimeout time . Duration
BoostTimeout time . Duration
BoostWorkers int
MaxWorkers int
}
// NewWorkerPool creates a new worker pool
func NewWorkerPool ( handle HandlerFunc , config WorkerPoolConfiguration ) * WorkerPool {
ctx , cancel := context . WithCancel ( context . Background ( ) )
dataChan := make ( chan Data , config . QueueLength )
pool := & WorkerPool {
baseCtx : ctx ,
cancel : cancel ,
batchLength : config . BatchLength ,
dataChan : dataChan ,
handle : handle ,
blockTimeout : config . BlockTimeout ,
boostTimeout : config . BoostTimeout ,
boostWorkers : config . BoostWorkers ,
maxNumberOfWorkers : config . MaxWorkers ,
}
return pool
}
}
// Push pushes the data to the internal channel
// Push pushes the data to the internal channel
func ( p * WorkerPool ) Push ( data Data ) {
func ( p * WorkerPool ) Push ( data Data ) {
atomic . AddInt64 ( & p . numInQueue , 1 )
p . lock . Lock ( )
p . lock . Lock ( )
if p . blockTimeout > 0 && p . boostTimeout > 0 && ( p . numberOfWorkers <= p . maxNumberOfWorkers || p . maxNumberOfWorkers < 0 ) {
if p . blockTimeout > 0 && p . boostTimeout > 0 && ( p . numberOfWorkers <= p . maxNumberOfWorkers || p . maxNumberOfWorkers < 0 ) {
p . lock . Unlock ( )
p . lock . Unlock ( )
@ -80,7 +116,7 @@ func (p *WorkerPool) pushBoost(data Data) {
log . Warn ( "WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v" , p . qid , mq . Name , ourTimeout , boost , p . boostTimeout , p . blockTimeout )
log . Warn ( "WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v" , p . qid , mq . Name , ourTimeout , boost , p . boostTimeout , p . blockTimeout )
start := time . Now ( )
start := time . Now ( )
pid := mq . RegisterWorkers ( boost , start , false , start , cancel )
pid := mq . RegisterWorkers ( boost , start , false , start , cancel , false )
go func ( ) {
go func ( ) {
<- ctx . Done ( )
<- ctx . Done ( )
mq . RemoveWorkers ( pid )
mq . RemoveWorkers ( pid )
@ -138,8 +174,8 @@ func (p *WorkerPool) BlockTimeout() time.Duration {
return p . blockTimeout
return p . blockTimeout
}
}
// SetSettings sets the setable boost values
// SetPool Settings sets the setable boost values
func ( p * WorkerPool ) SetSettings ( maxNumberOfWorkers , boostWorkers int , timeout time . Duration ) {
func ( p * WorkerPool ) SetPool Settings ( maxNumberOfWorkers , boostWorkers int , timeout time . Duration ) {
p . lock . Lock ( )
p . lock . Lock ( )
defer p . lock . Unlock ( )
defer p . lock . Unlock ( )
p . maxNumberOfWorkers = maxNumberOfWorkers
p . maxNumberOfWorkers = maxNumberOfWorkers
@ -156,8 +192,7 @@ func (p *WorkerPool) SetMaxNumberOfWorkers(newMax int) {
p . maxNumberOfWorkers = newMax
p . maxNumberOfWorkers = newMax
}
}
// AddWorkers adds workers to the pool - this allows the number of workers to go above the limit
func ( p * WorkerPool ) commonRegisterWorkers ( number int , timeout time . Duration , isFlusher bool ) ( context . Context , context . CancelFunc ) {
func ( p * WorkerPool ) AddWorkers ( number int , timeout time . Duration ) context . CancelFunc {
var ctx context . Context
var ctx context . Context
var cancel context . CancelFunc
var cancel context . CancelFunc
start := time . Now ( )
start := time . Now ( )
@ -173,7 +208,7 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance
mq := GetManager ( ) . GetManagedQueue ( p . qid )
mq := GetManager ( ) . GetManagedQueue ( p . qid )
if mq != nil {
if mq != nil {
pid := mq . RegisterWorkers ( number , start , hasTimeout , end , cancel )
pid := mq . RegisterWorkers ( number , start , hasTimeout , end , cancel , isFlusher )
go func ( ) {
go func ( ) {
<- ctx . Done ( )
<- ctx . Done ( )
mq . RemoveWorkers ( pid )
mq . RemoveWorkers ( pid )
@ -184,6 +219,12 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance
log . Trace ( "WorkerPool: %d adding %d workers (no group id)" , p . qid , number )
log . Trace ( "WorkerPool: %d adding %d workers (no group id)" , p . qid , number )
}
}
return ctx , cancel
}
// AddWorkers adds workers to the pool - this allows the number of workers to go above the limit
func ( p * WorkerPool ) AddWorkers ( number int , timeout time . Duration ) context . CancelFunc {
ctx , cancel := p . commonRegisterWorkers ( number , timeout , false )
p . addWorkers ( ctx , number )
p . addWorkers ( ctx , number )
return cancel
return cancel
}
}
@ -235,6 +276,7 @@ func (p *WorkerPool) CleanUp(ctx context.Context) {
close ( p . dataChan )
close ( p . dataChan )
for data := range p . dataChan {
for data := range p . dataChan {
p . handle ( data )
p . handle ( data )
atomic . AddInt64 ( & p . numInQueue , - 1 )
select {
select {
case <- ctx . Done ( ) :
case <- ctx . Done ( ) :
log . Warn ( "WorkerPool: %d Cleanup context closed before finishing clean-up" , p . qid )
log . Warn ( "WorkerPool: %d Cleanup context closed before finishing clean-up" , p . qid )
@ -245,6 +287,37 @@ func (p *WorkerPool) CleanUp(ctx context.Context) {
log . Trace ( "WorkerPool: %d CleanUp Done" , p . qid )
log . Trace ( "WorkerPool: %d CleanUp Done" , p . qid )
}
}
// Flush flushes the channel with a timeout - the Flush worker will be registered as a flush worker with the manager
func ( p * WorkerPool ) Flush ( timeout time . Duration ) error {
ctx , cancel := p . commonRegisterWorkers ( 1 , timeout , true )
defer cancel ( )
return p . FlushWithContext ( ctx )
}
// IsEmpty returns if true if the worker queue is empty
func ( p * WorkerPool ) IsEmpty ( ) bool {
return atomic . LoadInt64 ( & p . numInQueue ) == 0
}
// FlushWithContext is very similar to CleanUp but it will return as soon as the dataChan is empty
// NB: The worker will not be registered with the manager.
func ( p * WorkerPool ) FlushWithContext ( ctx context . Context ) error {
log . Trace ( "WorkerPool: %d Flush" , p . qid )
for {
select {
case data := <- p . dataChan :
p . handle ( data )
atomic . AddInt64 ( & p . numInQueue , - 1 )
case <- p . baseCtx . Done ( ) :
return p . baseCtx . Err ( )
case <- ctx . Done ( ) :
return ctx . Err ( )
default :
return nil
}
}
}
func ( p * WorkerPool ) doWork ( ctx context . Context ) {
func ( p * WorkerPool ) doWork ( ctx context . Context ) {
delay := time . Millisecond * 300
delay := time . Millisecond * 300
var data = make ( [ ] Data , 0 , p . batchLength )
var data = make ( [ ] Data , 0 , p . batchLength )
@ -254,6 +327,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) > 0 {
if len ( data ) > 0 {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
}
}
log . Trace ( "Worker shutting down" )
log . Trace ( "Worker shutting down" )
return
return
@ -263,6 +337,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) > 0 {
if len ( data ) > 0 {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
}
}
log . Trace ( "Worker shutting down" )
log . Trace ( "Worker shutting down" )
return
return
@ -271,6 +346,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) >= p . batchLength {
if len ( data ) >= p . batchLength {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
data = make ( [ ] Data , 0 , p . batchLength )
data = make ( [ ] Data , 0 , p . batchLength )
}
}
default :
default :
@ -286,6 +362,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) > 0 {
if len ( data ) > 0 {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
}
}
log . Trace ( "Worker shutting down" )
log . Trace ( "Worker shutting down" )
return
return
@ -301,6 +378,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) > 0 {
if len ( data ) > 0 {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
}
}
log . Trace ( "Worker shutting down" )
log . Trace ( "Worker shutting down" )
return
return
@ -309,6 +387,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) >= p . batchLength {
if len ( data ) >= p . batchLength {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
data = make ( [ ] Data , 0 , p . batchLength )
data = make ( [ ] Data , 0 , p . batchLength )
}
}
case <- timer . C :
case <- timer . C :
@ -316,6 +395,7 @@ func (p *WorkerPool) doWork(ctx context.Context) {
if len ( data ) > 0 {
if len ( data ) > 0 {
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
log . Trace ( "Handling: %d data, %v" , len ( data ) , data )
p . handle ( data ... )
p . handle ( data ... )
atomic . AddInt64 ( & p . numInQueue , - 1 * int64 ( len ( data ) ) )
data = make ( [ ] Data , 0 , p . batchLength )
data = make ( [ ] Data , 0 , p . batchLength )
}
}