Fix typo loopStoped -> loopStopped

This commit is contained in:
ionutboangiu
2021-07-27 17:14:54 +03:00
committed by Dan Christian Bogos
parent 68ee2291b6
commit 0e4ae73958
11 changed files with 24 additions and 24 deletions

View File

@@ -60,7 +60,7 @@ const CGRATES_CFG_JSON = `
"caps": 0, // maximum concurrent request allowed ( 0 to disabled )
"caps_strategy": "*busy", // strategy in case in case of concurrent requests reached
"caps_stats_interval": "0", // the interval we sample for caps stats ( 0 to disabled )
"shutdown_timeout": "1s" // the duration to wait until all services are stoped
"shutdown_timeout": "1s" // the duration to wait until all services are stopped
},

View File

@@ -273,7 +273,7 @@ func (s *Server) ServeBiRPC(addrJSON, addrGOB string, onConn func(*rpc2.Client),
}
defer lgob.Close()
}
<-s.stopbiRPCServer // wait until server is stoped to close the listener
<-s.stopbiRPCServer // wait until server is stopped to close the listener
return
}
@@ -295,7 +295,7 @@ func (s *Server) acceptBiRPC(srv *rpc2.Server, l net.Listener, codecName string,
return
}
s.stopbiRPCServer <- struct{}{}
utils.Logger.Crit(fmt.Sprintf("Stoped Bi%s server beacause %s", codecName, err))
utils.Logger.Crit(fmt.Sprintf("Stopped Bi%s server beacause %s", codecName, err))
return // stop if we get Accept error
}
go srv.ServeCodec(newCodec(conn, s.caps, s.anz))

View File

@@ -39,7 +39,7 @@
// "caps": 0, // maximum concurrent request allowed ( 0 to disabled )
// "caps_strategy": "*busy", // strategy in case in case of concurrent requests reached
// "caps_stats_interval": "0", // the interval we sample for caps stats ( 0 to disabled )
// "shutdown_timeout": "1s" // the duration to wait until all services are stoped
// "shutdown_timeout": "1s" // the duration to wait until all services are stopped
// },

View File

@@ -55,7 +55,7 @@ Test calls
Since the user 1001 is marked as *prepaid* inside the telecom switch, calling between 1001 and 1002 should generate pre-auth and prepaid debits which can be checked with *get_account* command integrated within *cgr-console* tool. Charging will be done based on time of day as described in the tariff plan definition above.
*Note*: An important particularity to note here is the ability of **CGRateS** SessionManager to refund units booked in advance (eg: if debit occurs every 10s and rate increments are set to 1s, the SessionManager will be smart enough to refund pre-booked credits for calls stoped in the middle of debit interval).
*Note*: An important particularity to note here is the ability of **CGRateS** SessionManager to refund units booked in advance (eg: if debit occurs every 10s and rate increments are set to 1s, the SessionManager will be smart enough to refund pre-booked credits for calls stopped in the middle of debit interval).
Check that 1001 balance is properly deducted, during the call, and moreover considering that general balance has priority over the shared one debits for this call should take place at first out of general balance.

View File

@@ -370,7 +370,7 @@ func NewResourceService(dm *DataManager, cgrcfg *config.CGRConfig,
storedResources: make(utils.StringSet),
cgrcfg: cgrcfg,
filterS: filterS,
loopStoped: make(chan struct{}),
loopStopped: make(chan struct{}),
stopBackup: make(chan struct{}),
connMgr: connMgr,
}
@@ -385,14 +385,14 @@ type ResourceService struct {
srMux sync.RWMutex // protects storedResources
cgrcfg *config.CGRConfig
stopBackup chan struct{} // control storing process
loopStoped chan struct{}
loopStopped chan struct{}
connMgr *ConnManager
}
// Reload stops the backupLoop and restarts it
func (rS *ResourceService) Reload() {
close(rS.stopBackup)
<-rS.loopStoped // wait until the loop is done
<-rS.loopStopped // wait until the loop is done
rS.stopBackup = make(chan struct{})
go rS.runBackup()
}
@@ -414,14 +414,14 @@ func (rS *ResourceService) Shutdown() {
func (rS *ResourceService) runBackup() {
storeInterval := rS.cgrcfg.ResourceSCfg().StoreInterval
if storeInterval <= 0 {
rS.loopStoped <- struct{}{}
rS.loopStopped <- struct{}{}
return
}
for {
rS.storeResources()
select {
case <-rS.stopBackup:
rS.loopStoped <- struct{}{}
rS.loopStopped <- struct{}{}
return
case <-time.After(storeInterval):
}

View File

@@ -38,7 +38,7 @@ func NewStatService(dm *DataManager, cgrcfg *config.CGRConfig,
filterS: filterS,
cgrcfg: cgrcfg,
storedStatQueues: make(utils.StringSet),
loopStoped: make(chan struct{}),
loopStopped: make(chan struct{}),
stopBackup: make(chan struct{}),
}
}
@@ -49,7 +49,7 @@ type StatService struct {
connMgr *ConnManager
filterS *FilterS
cgrcfg *config.CGRConfig
loopStoped chan struct{}
loopStopped chan struct{}
stopBackup chan struct{}
storedStatQueues utils.StringSet // keep a record of stats which need saving, map[statsTenantID]bool
ssqMux sync.RWMutex // protects storedStatQueues
@@ -58,7 +58,7 @@ type StatService struct {
// Reload stops the backupLoop and restarts it
func (sS *StatService) Reload() {
close(sS.stopBackup)
<-sS.loopStoped // wait until the loop is done
<-sS.loopStopped // wait until the loop is done
sS.stopBackup = make(chan struct{})
go sS.runBackup()
}
@@ -80,14 +80,14 @@ func (sS *StatService) Shutdown() {
func (sS *StatService) runBackup() {
storeInterval := sS.cgrcfg.StatSCfg().StoreInterval
if storeInterval <= 0 {
sS.loopStoped <- struct{}{}
sS.loopStopped <- struct{}{}
return
}
for {
sS.storeStats()
select {
case <-sS.stopBackup:
sS.loopStoped <- struct{}{}
sS.loopStopped <- struct{}{}
return
case <-time.After(storeInterval):
}

View File

@@ -209,7 +209,7 @@ func NewThresholdService(dm *DataManager, cgrcfg *config.CGRConfig, filterS *Fil
cgrcfg: cgrcfg,
filterS: filterS,
stopBackup: make(chan struct{}),
loopStoped: make(chan struct{}),
loopStopped: make(chan struct{}),
storedTdIDs: make(utils.StringSet),
}
}
@@ -220,7 +220,7 @@ type ThresholdService struct {
cgrcfg *config.CGRConfig
filterS *FilterS
stopBackup chan struct{}
loopStoped chan struct{}
loopStopped chan struct{}
storedTdIDs utils.StringSet // keep a record of stats which need saving, map[statsTenantID]bool
stMux sync.RWMutex // protects storedTdIDs
}
@@ -228,7 +228,7 @@ type ThresholdService struct {
// Reload stops the backupLoop and restarts it
func (tS *ThresholdService) Reload() {
close(tS.stopBackup)
<-tS.loopStoped // wait until the loop is done
<-tS.loopStopped // wait until the loop is done
tS.stopBackup = make(chan struct{})
go tS.runBackup()
}
@@ -250,14 +250,14 @@ func (tS *ThresholdService) Shutdown() {
func (tS *ThresholdService) runBackup() {
storeInterval := tS.cgrcfg.ThresholdSCfg().StoreInterval
if storeInterval <= 0 {
tS.loopStoped <- struct{}{}
tS.loopStopped <- struct{}{}
return
}
for {
tS.storeThresholds()
select {
case <-tS.stopBackup:
tS.loopStoped <- struct{}{}
tS.loopStopped <- struct{}{}
return
case <-time.After(storeInterval):
}

View File

@@ -49,7 +49,7 @@ import (
* - configure the cluster with the following command:
* `./redis-trib.rb create --replicas 1 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006`
*
* To run the tests you need to specify the `redisCluster` flag and have the redis stoped:
* To run the tests you need to specify the `redisCluster` flag and have the redis stopped:
* `go test github.com/cgrates/cgrates/general_tests -tags=integration -dbtype=*mysql -run=TestRedisCluster -redisCluster -v`
*
* The configuration of the cluster is the following:

View File

@@ -67,7 +67,7 @@ var (
// Node2 will be slave of node1 and start at port 16380
// Sentinel1 will be started at port 16381 and will watch Node1
// Sentinel2 will be started at port 16382 and will watch Node1
// Also make sure that redis process is stoped
// Also make sure that redis process is stopped
func TestRedisSentinel(t *testing.T) {
if !*redisSentinel {
return

View File

@@ -160,7 +160,7 @@ func (s *Scheduler) loadTasks() {
utils.SchedulerS, err.Error(), s.cfg.SchedulerCfg().Filters, task.ActionsID, task.AccountID))
}
// we do not push the task back as this may cause an infinite loop
// push it when the function is done and we stoped the for
// push it when the function is done and we stopped the for
// do not use defer here as the functions are exeucted
// from the last one to the first
unexecutedTasks = append(unexecutedTasks, task)

View File

@@ -97,7 +97,7 @@ func TestKamailioAgentReload(t *testing.T) {
t.Fatalf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
// the engine should be stoped as we could not connect to kamailio
// the engine should be stopped as we could not connect to kamailio
shdChan.CloseOnce()
time.Sleep(10 * time.Millisecond)