mirror of https://github.com/go-gitea/gitea
Use global lock instead of NewExclusivePool to allow distributed lock between multiple Gitea instances (#31813)
Replace #26486 Fix #19620 --------- Co-authored-by: Jason Song <i@wolfogre.com>pull/31997/head
parent
a5818470fe
commit
2da2000413
File diff suppressed because one or more lines are too long
@ -0,0 +1,37 @@ |
|||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package setting |
||||||
|
|
||||||
|
import ( |
||||||
|
"code.gitea.io/gitea/modules/log" |
||||||
|
"code.gitea.io/gitea/modules/nosql" |
||||||
|
) |
||||||
|
|
||||||
|
// GlobalLock represents configuration of global lock
|
||||||
|
var GlobalLock = struct { |
||||||
|
ServiceType string |
||||||
|
ServiceConnStr string |
||||||
|
}{ |
||||||
|
ServiceType: "memory", |
||||||
|
} |
||||||
|
|
||||||
|
func loadGlobalLockFrom(rootCfg ConfigProvider) { |
||||||
|
sec := rootCfg.Section("global_lock") |
||||||
|
GlobalLock.ServiceType = sec.Key("SERVICE_TYPE").MustString("memory") |
||||||
|
switch GlobalLock.ServiceType { |
||||||
|
case "memory": |
||||||
|
case "redis": |
||||||
|
connStr := sec.Key("SERVICE_CONN_STR").String() |
||||||
|
if connStr == "" { |
||||||
|
log.Fatal("SERVICE_CONN_STR is empty for redis") |
||||||
|
} |
||||||
|
u := nosql.ToRedisURI(connStr) |
||||||
|
if u == nil { |
||||||
|
log.Fatal("SERVICE_CONN_STR %s is not a valid redis connection string", connStr) |
||||||
|
} |
||||||
|
GlobalLock.ServiceConnStr = connStr |
||||||
|
default: |
||||||
|
log.Fatal("Unknown sync lock service type: %s", GlobalLock.ServiceType) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package setting |
||||||
|
|
||||||
|
import ( |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert" |
||||||
|
) |
||||||
|
|
||||||
|
func TestLoadGlobalLockConfig(t *testing.T) { |
||||||
|
t.Run("DefaultGlobalLockConfig", func(t *testing.T) { |
||||||
|
iniStr := `` |
||||||
|
cfg, err := NewConfigProviderFromData(iniStr) |
||||||
|
assert.NoError(t, err) |
||||||
|
|
||||||
|
loadGlobalLockFrom(cfg) |
||||||
|
assert.EqualValues(t, "memory", GlobalLock.ServiceType) |
||||||
|
}) |
||||||
|
|
||||||
|
t.Run("RedisGlobalLockConfig", func(t *testing.T) { |
||||||
|
iniStr := ` |
||||||
|
[global_lock] |
||||||
|
SERVICE_TYPE = redis |
||||||
|
SERVICE_CONN_STR = addrs=127.0.0.1:6379 db=0 |
||||||
|
` |
||||||
|
cfg, err := NewConfigProviderFromData(iniStr) |
||||||
|
assert.NoError(t, err) |
||||||
|
|
||||||
|
loadGlobalLockFrom(cfg) |
||||||
|
assert.EqualValues(t, "redis", GlobalLock.ServiceType) |
||||||
|
assert.EqualValues(t, "addrs=127.0.0.1:6379 db=0", GlobalLock.ServiceConnStr) |
||||||
|
}) |
||||||
|
} |
@ -1,69 +0,0 @@ |
|||||||
// Copyright 2016 The Gogs Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
package sync |
|
||||||
|
|
||||||
import ( |
|
||||||
"sync" |
|
||||||
) |
|
||||||
|
|
||||||
// ExclusivePool is a pool of non-identical instances
|
|
||||||
// that only one instance with same identity is in the pool at a time.
|
|
||||||
// In other words, only instances with different identities can be in
|
|
||||||
// the pool the same time. If another instance with same identity tries
|
|
||||||
// to get into the pool, it hangs until previous instance left the pool.
|
|
||||||
//
|
|
||||||
// This pool is particularly useful for performing tasks on same resource
|
|
||||||
// on the file system in different goroutines.
|
|
||||||
type ExclusivePool struct { |
|
||||||
lock sync.Mutex |
|
||||||
|
|
||||||
// pool maintains locks for each instance in the pool.
|
|
||||||
pool map[string]*sync.Mutex |
|
||||||
|
|
||||||
// count maintains the number of times an instance with same identity checks in
|
|
||||||
// to the pool, and should be reduced to 0 (removed from map) by checking out
|
|
||||||
// with same number of times.
|
|
||||||
// The purpose of count is to delete lock when count down to 0 and recycle memory
|
|
||||||
// from map object.
|
|
||||||
count map[string]int |
|
||||||
} |
|
||||||
|
|
||||||
// NewExclusivePool initializes and returns a new ExclusivePool object.
|
|
||||||
func NewExclusivePool() *ExclusivePool { |
|
||||||
return &ExclusivePool{ |
|
||||||
pool: make(map[string]*sync.Mutex), |
|
||||||
count: make(map[string]int), |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// CheckIn checks in an instance to the pool and hangs while instance
|
|
||||||
// with same identity is using the lock.
|
|
||||||
func (p *ExclusivePool) CheckIn(identity string) { |
|
||||||
p.lock.Lock() |
|
||||||
|
|
||||||
lock, has := p.pool[identity] |
|
||||||
if !has { |
|
||||||
lock = &sync.Mutex{} |
|
||||||
p.pool[identity] = lock |
|
||||||
} |
|
||||||
p.count[identity]++ |
|
||||||
|
|
||||||
p.lock.Unlock() |
|
||||||
lock.Lock() |
|
||||||
} |
|
||||||
|
|
||||||
// CheckOut checks out an instance from the pool and releases the lock
|
|
||||||
// to let other instances with same identity to grab the lock.
|
|
||||||
func (p *ExclusivePool) CheckOut(identity string) { |
|
||||||
p.lock.Lock() |
|
||||||
defer p.lock.Unlock() |
|
||||||
|
|
||||||
p.pool[identity].Unlock() |
|
||||||
if p.count[identity] == 1 { |
|
||||||
delete(p.pool, identity) |
|
||||||
delete(p.count, identity) |
|
||||||
} else { |
|
||||||
p.count[identity]-- |
|
||||||
} |
|
||||||
} |
|
Loading…
Reference in new issue