mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-11 18:16:24 +05:00
Test in services
This commit is contained in:
committed by
Dan Christian Bogos
parent
372662bc5a
commit
a0f4bcdc65
@@ -29,7 +29,7 @@ func TestActionSCfgLoadFromJSONCfg(t *testing.T) {
|
||||
jsonCfg := &ActionSJsonCfg{
|
||||
Enabled: utils.BoolPointer(true),
|
||||
Indexed_selects: utils.BoolPointer(false),
|
||||
Tenants: &[]string{"cgrates.org", "cgrates.com"},
|
||||
Tenants: &[]string{"itsyscom.com"},
|
||||
String_indexed_fields: &[]string{"*req.index1"},
|
||||
Prefix_indexed_fields: &[]string{"*req.index1", "*req.index2"},
|
||||
Suffix_indexed_fields: &[]string{"*req.index1"},
|
||||
@@ -38,7 +38,7 @@ func TestActionSCfgLoadFromJSONCfg(t *testing.T) {
|
||||
expected := &ActionSCfg{
|
||||
Enabled: true,
|
||||
IndexedSelects: false,
|
||||
Tenants: &[]string{"cgrates.org", "cgrates.com"},
|
||||
Tenants: &[]string{"itsyscom.com"},
|
||||
StringIndexedFields: &[]string{"*req.index1"},
|
||||
PrefixIndexedFields: &[]string{"*req.index1", "*req.index2"},
|
||||
SuffixIndexedFields: &[]string{"*req.index1"},
|
||||
@@ -56,7 +56,7 @@ func TestActionSCfgAsMapInterface(t *testing.T) {
|
||||
cfgJSONStr := `{
|
||||
"actions": {
|
||||
"enabled": true,
|
||||
"tenants": ["cgrates.org", "cgrates.com"],
|
||||
"tenants": ["itsyscom.com"],
|
||||
"indexed_selects": false,
|
||||
"string_indexed_fields": ["*req.index1"],
|
||||
"prefix_indexed_fields": ["*req.index1","*req.index2"],
|
||||
@@ -67,7 +67,7 @@ func TestActionSCfgAsMapInterface(t *testing.T) {
|
||||
|
||||
eMap := map[string]interface{}{
|
||||
utils.EnabledCfg: true,
|
||||
utils.Tenants: []string{"cgrates.org", "cgrates.com"},
|
||||
utils.Tenants: []string{"itsyscom.com"},
|
||||
utils.IndexedSelectsCfg: false,
|
||||
utils.StringIndexedFieldsCfg: []string{"*req.index1"},
|
||||
utils.PrefixIndexedFieldsCfg: []string{"*req.index1", "*req.index2"},
|
||||
@@ -84,7 +84,7 @@ func TestActionSCfgAsMapInterface(t *testing.T) {
|
||||
func TestActionSCfgClone(t *testing.T) {
|
||||
ban := &ActionSCfg{
|
||||
Enabled: true,
|
||||
Tenants: &[]string{"cgrates.org", "cgrates.com"},
|
||||
Tenants: &[]string{"itsyscom.com"},
|
||||
IndexedSelects: false,
|
||||
StringIndexedFields: &[]string{"*req.index1"},
|
||||
PrefixIndexedFields: &[]string{"*req.index1", "*req.index2"},
|
||||
@@ -95,7 +95,7 @@ func TestActionSCfgClone(t *testing.T) {
|
||||
if !reflect.DeepEqual(ban, rcv) {
|
||||
t.Errorf("\nExpected: %+v\nReceived: %+v", utils.ToJSON(ban), utils.ToJSON(rcv))
|
||||
}
|
||||
if (*rcv.Tenants)[0] = ""; (*ban.Tenants)[0] != "cgrates.org" {
|
||||
if (*rcv.Tenants)[0] = ""; (*ban.Tenants)[0] != "itsyscom.com" {
|
||||
t.Errorf("Expected clone to not modify the cloned")
|
||||
}
|
||||
if (*rcv.StringIndexedFields)[0] = ""; (*ban.StringIndexedFields)[0] != "*req.index1" {
|
||||
|
||||
@@ -15,10 +15,18 @@ GNU General Public License for more details.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
//TestDataDBCoverage for cover testing
|
||||
func TestDataDBCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
@@ -161,4 +169,3 @@ func TestDataDBCoverage(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -18,6 +18,18 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/agents"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestDiameterAgentCoverage for cover testing
|
||||
func TestDiameterAgentCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
@@ -17,57 +17,56 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/ers"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
//TestEventReaderSCoverage for cover testing
|
||||
func TestEventReaderSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
cfg.SessionSCfg().Enabled = true
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
sS := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1), shdChan, nil, nil, anz, srvDep)
|
||||
attrS := NewEventReaderService(cfg, filterSChan, shdChan, nil, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(attrS, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if attrS.IsRunning() {
|
||||
srv := NewEventReaderService(cfg, filterSChan, shdChan, nil, srvDep)
|
||||
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "ers_reload", "internal"),
|
||||
Section: config.ERsJson,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !attrS.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
|
||||
cfg.ERsCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.ERsJson) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if attrS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
srv2 := EventReaderService{
|
||||
RWMutex: sync.RWMutex{},
|
||||
cfg: cfg,
|
||||
filterSChan: filterSChan,
|
||||
shdChan: shdChan,
|
||||
ers: &ers.ERService{},
|
||||
rldChan: make(chan struct{}, 1),
|
||||
stopChan: make(chan struct{}, 1),
|
||||
connMgr: nil,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.ERs) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ERs, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
srv2.Shutdown()
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,56 +17,52 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/agents"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestFreeSwitchAgentCoverage for cover testing
|
||||
func TestFreeSwitchAgentCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
cfg.SessionSCfg().Enabled = true
|
||||
cfg.SessionSCfg().ListenBijson = ""
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
cacheSChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheSChan <- chS
|
||||
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
sS := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, nil, anz, srvDep)
|
||||
|
||||
srv := NewFreeswitchAgent(cfg, shdChan, nil, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(srv, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "tutorial_tests", "fs_evsock", "cgrates", "etc", "cgrates"),
|
||||
Section: config.FreeSWITCHAgentJSN,
|
||||
}, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
srv2 := FreeswitchAgent{
|
||||
cfg: cfg,
|
||||
shdChan: shdChan,
|
||||
fS: &agents.FSsessions{},
|
||||
connMgr: nil,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
// the engine should be stoped as we could not connect to freeswich
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.FreeSWITCHAgent) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.FreeSWITCHAgent, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
runtime.Gosched()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -42,6 +42,7 @@ func TestGlobalVarS(t *testing.T) {
|
||||
t.Errorf("This service needs to be running")
|
||||
}
|
||||
cfg.HTTPCfg().ClientOpts[utils.HTTPClientDialTimeoutCfg] = "30as"
|
||||
|
||||
if err := exp.Shutdown(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -17,57 +17,56 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/agents"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestKamailioAgentCoverage for cover testing
|
||||
/*
|
||||
|
||||
func TestKamailioAgentCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
cfg.SessionSCfg().Enabled = true
|
||||
cfg.SessionSCfg().ListenBijson = ""
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
|
||||
cacheSChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheSChan <- chS
|
||||
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
sS := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, nil, anz, srvDep)
|
||||
srv := NewKamailioAgent(cfg, shdChan, nil, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(srv, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "tutorial_tests", "kamevapi", "cgrates", "etc", "cgrates"),
|
||||
Section: config.KamailioAgentJSN,
|
||||
}, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
srv2 := KamailioAgent{
|
||||
cfg: cfg,
|
||||
shdChan: shdChan,
|
||||
kam: &agents.KamailioAgent{},
|
||||
connMgr: nil,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
runtime.Gosched()
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
// the engine should be stoped as we could not connect to kamailio
|
||||
if srv.IsRunning() {
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.KamailioAgent) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.KamailioAgent, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
srv2.Shutdown()
|
||||
if srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -22,10 +22,11 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/loaders"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/loaders"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
@@ -52,16 +53,41 @@ func TestLoaderSCoverage(t *testing.T) {
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
srv.ldrs = loaders.NewLoaderService(&engine.DataManager{}, []*config.LoaderSCfg{{
|
||||
ID: "test_id",
|
||||
Enabled: true,
|
||||
}},
|
||||
"test", &engine.FilterS{}, &engine.ConnManager{})
|
||||
srv.ldrs = loaders.NewLoaderService(&engine.DataManager{},
|
||||
[]*config.LoaderSCfg{{
|
||||
ID: "test_id",
|
||||
Enabled: true,
|
||||
Tenant: nil,
|
||||
DryRun: false,
|
||||
RunDelay: 0,
|
||||
LockFileName: "",
|
||||
CacheSConns: nil,
|
||||
FieldSeparator: "",
|
||||
TpInDir: "",
|
||||
TpOutDir: "",
|
||||
Data: nil,
|
||||
}}, "",
|
||||
&engine.FilterS{}, nil)
|
||||
if !srv.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
serviceName := srv.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.LoaderS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.LoaderS, serviceName)
|
||||
}
|
||||
shouldRun := srv.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
if !reflect.DeepEqual(srv.GetLoaderS(), srv.ldrs) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", srv.ldrs, srv.GetLoaderS())
|
||||
}
|
||||
srv.stopChan = make(chan struct{}, 1)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
srv.connChan <- chS
|
||||
srv.Shutdown()
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -17,71 +17,55 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/agents"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestLoaderSCoverage for cover testing
|
||||
func TestRadiusAgentCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
cfg.SessionSCfg().Enabled = true
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
|
||||
cacheSChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheSChan <- chS
|
||||
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
sS := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, nil, anz, srvDep)
|
||||
srv := NewRadiusAgent(cfg, filterSChan, shdChan, nil, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(srv, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "radagent_mysql"),
|
||||
Section: config.RA_JSN,
|
||||
}, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
srv2 := RadiusAgent{
|
||||
cfg: cfg,
|
||||
filterSChan: filterSChan,
|
||||
shdChan: shdChan,
|
||||
rad: &agents.RadiusAgent{},
|
||||
connMgr: nil,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.RadiusAgent) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.RadiusAgent, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
srv2.stopChan = make(chan struct{}, 1)
|
||||
srv2.Shutdown()
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !srv.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
|
||||
err := srv.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
}
|
||||
err = srv.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.RadiusAgentCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.RA_JSN) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,103 +17,72 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/cgrates/cgrates/apier/v1"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestRalsCoverage for cover testing
|
||||
func TestRalsCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholds))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdFilterIndexes))
|
||||
|
||||
close(chS.GetPrecacheChannel(utils.CacheDestinations))
|
||||
close(chS.GetPrecacheChannel(utils.CacheReverseDestinations))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRatingPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRatingProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActions))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActionPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheAccountActionPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActionTriggers))
|
||||
close(chS.GetPrecacheChannel(utils.CacheSharedGroups))
|
||||
close(chS.GetPrecacheChannel(utils.CacheTimings))
|
||||
|
||||
cfg.ThresholdSCfg().Enabled = true
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
cfg.StorDbCfg().Type = utils.INTERNAL
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
stordb := NewStorDBService(cfg, srvDep)
|
||||
schS := NewSchedulerService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
tS := NewThresholdService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), anz, srvDep)
|
||||
ralS := NewRalService(cfg, chS, server,
|
||||
make(chan rpcclient.ClientConnector, 1),
|
||||
make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, anz, srvDep)
|
||||
srvMngr.AddServices(ralS, schS, tS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db, stordb)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if ralS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
ralS2 := RalService{
|
||||
responder: &ResponderService{
|
||||
cfg: cfg,
|
||||
server: server,
|
||||
shdChan: shdChan,
|
||||
resp: &engine.Responder{},
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
},
|
||||
cfg: cfg,
|
||||
cacheS: chS,
|
||||
server: server,
|
||||
rals: &v1.RALsV1{},
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
}
|
||||
if stordb.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongo"),
|
||||
Section: config.RALS_JSN,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !ralS.IsRunning() {
|
||||
ralS2.responder.connChan <- chS
|
||||
if !ralS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
|
||||
if resp := ralS.GetResponder(); !resp.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
serviceName := ralS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.RALService) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.RALService, serviceName)
|
||||
}
|
||||
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
shouldRun := ralS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
if !stordb.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
if !reflect.DeepEqual(ralS2.GetResponder(), ralS2.responder) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", ralS2.responder, ralS2.GetResponder())
|
||||
}
|
||||
err := ralS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
}
|
||||
err = ralS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.RalsCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.RALS_JSN) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
ralS2.connChan <- chS
|
||||
ralS2.Shutdown()
|
||||
if ralS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if resp := ralS.GetResponder(); resp.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,63 +17,62 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/rates"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestRateSCoverage for cover testing
|
||||
func TestRateSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheRateProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRateProfilesFilterIndexes))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRateFilterIndexes))
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
rS := NewRateService(cfg, chS, filterSChan, db, server, make(chan rpcclient.ClientConnector, 1), anz, srvDep)
|
||||
srvMngr.AddServices(rS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if rS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "rates"),
|
||||
Section: config.RateSJson,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
rS2 := RateService{
|
||||
cfg: cfg,
|
||||
filterSChan: filterSChan,
|
||||
dmS: db,
|
||||
cacheS: chS,
|
||||
server: server,
|
||||
stopChan: make(chan struct{}),
|
||||
intConnChan: make(chan rpcclient.ClientConnector, 1),
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
rateS: &rates.RateS{},
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !rS.IsRunning() {
|
||||
if !rS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
err := rS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
serviceName := rS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.RateS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.RateS, serviceName)
|
||||
}
|
||||
err = rS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
shouldRun := rS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
cfg.RateSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.RateSJson) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
rS2.intConnChan <- chS
|
||||
rS2.Shutdown()
|
||||
if rS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,76 +17,56 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestResourceSCoverage for cover testing
|
||||
func TestResourceSCoverage(t *testing.T) {
|
||||
// utils.Logger.SetLogLevel(7)
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
cfg.ThresholdSCfg().Enabled = true
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholds))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdFilterIndexes))
|
||||
close(chS.GetPrecacheChannel(utils.CacheResourceProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheResources))
|
||||
close(chS.GetPrecacheChannel(utils.CacheResourceFilterIndexes))
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
tS := NewThresholdService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), anz, srvDep)
|
||||
reS := NewResourceService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(tS, reS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if reS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
reS2 := ResourceService{
|
||||
cfg: cfg,
|
||||
dm: db,
|
||||
cacheS: chS,
|
||||
filterSChan: filterSChan,
|
||||
server: server,
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
connMgr: nil,
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
reS: &engine.ResourceService{},
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongo"),
|
||||
Section: config.RESOURCES_JSON,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !reS.IsRunning() {
|
||||
if !reS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
serviceName := reS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.ResourceS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ResourceS, serviceName)
|
||||
}
|
||||
err := reS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
shouldRun := reS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
err = reS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.ResourceSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.RESOURCES_JSON) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if reS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,74 +17,62 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestSupplierSCoverage for cover testing
|
||||
func TestSupplierSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
cfg.StatSCfg().Enabled = true
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheRouteProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRouteFilterIndexes))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatQueueProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatQueues))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatFilterIndexes))
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
sts := NewStatService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
supS := NewRouteService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(supS, sts,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if supS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
supS2 := &RouteService{
|
||||
cfg: cfg,
|
||||
dm: db,
|
||||
cacheS: chS,
|
||||
filterSChan: filterSChan,
|
||||
server: server,
|
||||
connMgr: nil,
|
||||
routeS: &engine.RouteService{},
|
||||
rpc: nil,
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongonew"),
|
||||
Section: config.RouteSJson,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !supS.IsRunning() {
|
||||
if !supS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
serviceName := supS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.RouteS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.RouteS, serviceName)
|
||||
}
|
||||
err := supS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
shouldRun := supS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
err = supS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.RouteSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.RouteSJson) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
supS2.connChan <- chS
|
||||
supS2.Shutdown()
|
||||
if supS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,72 +17,50 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/scheduler"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestSchedulerSCoverage for cover testing
|
||||
func TestSchedulerSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
close(chS.GetPrecacheChannel(utils.CacheActionPlans))
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
schS := NewSchedulerService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(schS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if schS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongonew"),
|
||||
Section: config.SCHEDULER_JSN,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
schS.schS = &scheduler.Scheduler{}
|
||||
if !schS.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
err := schS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
}
|
||||
err = schS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
getScheduler := schS.GetScheduler()
|
||||
if !reflect.DeepEqual(schS.schS, getScheduler) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ToJSON(schS.schS), utils.ToJSON(getScheduler))
|
||||
}
|
||||
cfg.SchedulerCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.SCHEDULER_JSN) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if schS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
serviceName := schS.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.SchedulerS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.SchedulerS, serviceName)
|
||||
}
|
||||
shouldRun := schS.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
if !reflect.DeepEqual(schS.GetScheduler(), schS.schS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", schS.schS, schS.GetScheduler())
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,106 +17,65 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/sessions"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestSessionSCoverage for cover testing
|
||||
func TestSessionSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
cfg.ChargerSCfg().Enabled = true
|
||||
cfg.RalsCfg().Enabled = true
|
||||
cfg.CdrsCfg().Enabled = true
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
|
||||
close(chS.GetPrecacheChannel(utils.CacheChargerProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheChargerFilterIndexes))
|
||||
|
||||
close(chS.GetPrecacheChannel(utils.CacheDestinations))
|
||||
close(chS.GetPrecacheChannel(utils.CacheReverseDestinations))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRatingPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheRatingProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActions))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActionPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheAccountActionPlans))
|
||||
close(chS.GetPrecacheChannel(utils.CacheActionTriggers))
|
||||
close(chS.GetPrecacheChannel(utils.CacheSharedGroups))
|
||||
close(chS.GetPrecacheChannel(utils.CacheTimings))
|
||||
|
||||
internalChan := make(chan rpcclient.ClientConnector, 1)
|
||||
internalChan <- nil
|
||||
cacheSChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheSChan <- chS
|
||||
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
cfg.StorDbCfg().Type = utils.INTERNAL
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
stordb := NewStorDBService(cfg, srvDep)
|
||||
chrS := NewChargerService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
schS := NewSchedulerService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
ralS := NewRalService(cfg, chS, server,
|
||||
make(chan rpcclient.ClientConnector, 1), make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, anz, srvDep)
|
||||
cdrS := NewCDRServer(cfg, db, stordb, filterSChan, server,
|
||||
make(chan rpcclient.ClientConnector, 1),
|
||||
nil, anz, srvDep)
|
||||
srv := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1), shdChan, nil, nil, anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(srv, chrS, schS, ralS, cdrS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db, stordb)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
srv2 := SessionService{
|
||||
cfg: cfg,
|
||||
dm: db,
|
||||
server: server,
|
||||
shdChan: shdChan,
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
connMgr: nil,
|
||||
caps: nil,
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
sm: &sessions.SessionS{},
|
||||
}
|
||||
if stordb.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongonew"),
|
||||
Section: config.SessionSJson,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.SessionS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.SessionS, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !srv.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
|
||||
err := srv.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
}
|
||||
err = srv.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.SessionSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.SessionSJson) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,71 +17,51 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/agents"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestSIPAgentCoverage for cover testing
|
||||
func TestSIPAgentCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
cfg.SessionSCfg().Enabled = true
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
|
||||
cacheSChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheSChan <- chS
|
||||
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
sS := NewSessionService(cfg, db, server, make(chan rpcclient.ClientConnector, 1),
|
||||
shdChan, nil, nil, anz, srvDep)
|
||||
srv := NewSIPAgent(cfg, filterSChan, shdChan, nil, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(srv, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "sipagent_mysql"),
|
||||
Section: config.SIPAgentJson,
|
||||
}, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
srv2 := SIPAgent{
|
||||
cfg: cfg,
|
||||
filterSChan: filterSChan,
|
||||
shdChan: shdChan,
|
||||
sip: &agents.SIPAgent{},
|
||||
connMgr: nil,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
if !srv2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
serviceName := srv2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.SIPAgent) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.SIPAgent, serviceName)
|
||||
}
|
||||
shouldRun := srv2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !srv.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
|
||||
srvStart := srv.Start()
|
||||
if srvStart != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, srvStart)
|
||||
}
|
||||
err := srv.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <err>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.SIPAgentCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.SIPAgentJson) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -17,76 +17,56 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestStatSCoverage for cover testing
|
||||
func TestStatSCoverage(t *testing.T) {
|
||||
// utils.Logger.SetLogLevel(7)
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
|
||||
utils.Logger, _ = utils.Newlogger(utils.MetaSysLog, cfg.GeneralCfg().NodeID)
|
||||
utils.Logger.SetLogLevel(7)
|
||||
cfg.ThresholdSCfg().Enabled = true
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholds))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdFilterIndexes))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatQueueProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatQueues))
|
||||
close(chS.GetPrecacheChannel(utils.CacheStatFilterIndexes))
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
tS := NewThresholdService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), anz, srvDep)
|
||||
sS := NewStatService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(tS, sS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if sS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
sS2 := StatService{
|
||||
cfg: cfg,
|
||||
dm: db,
|
||||
cacheS: chS,
|
||||
filterSChan: filterSChan,
|
||||
server: server,
|
||||
connMgr: nil,
|
||||
sts: &engine.StatService{},
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongo"),
|
||||
Section: config.STATS_JSON,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !sS.IsRunning() {
|
||||
if !sS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
serviceName := sS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.StatS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.StatS, serviceName)
|
||||
}
|
||||
err := sS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
shouldRun := sS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
err = sS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.StatSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.STATS_JSON) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if sS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -18,6 +18,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
package services
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -48,4 +49,16 @@ func TestStorDBServiceCoverage(t *testing.T) {
|
||||
User: "test_user",
|
||||
Password: "test_pass",
|
||||
}
|
||||
serviceName := srv.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.StorDB) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.StorDB, serviceName)
|
||||
}
|
||||
shouldRun := srv.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
srv.Shutdown()
|
||||
if srv.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,67 +17,54 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package services
|
||||
|
||||
/*
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/cores"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"github.com/cgrates/rpcclient"
|
||||
)
|
||||
|
||||
//TestThresholdSCoverage for cover testing
|
||||
func TestThresholdSCoverage(t *testing.T) {
|
||||
cfg := config.NewDefaultCGRConfig()
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
shdWg := new(sync.WaitGroup)
|
||||
chS := engine.NewCacheS(cfg, nil, nil)
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdProfiles))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholds))
|
||||
close(chS.GetPrecacheChannel(utils.CacheThresholdFilterIndexes))
|
||||
server := cores.NewServer(nil)
|
||||
srvMngr := servmanager.NewServiceManager(cfg, shdChan, shdWg)
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
anz := NewAnalyzerService(cfg, server, filterSChan, shdChan, make(chan rpcclient.ClientConnector, 1), srvDep)
|
||||
db := NewDataDBService(cfg, nil, srvDep)
|
||||
tS := NewThresholdService(cfg, db, chS, filterSChan, server, make(chan rpcclient.ClientConnector, 1), anz, srvDep)
|
||||
engine.NewConnManager(cfg, nil)
|
||||
srvMngr.AddServices(tS,
|
||||
NewLoaderService(cfg, db, filterSChan, server, make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db)
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if tS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
if db.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
thrs1, _ := engine.NewThresholdService(&engine.DataManager{}, &config.CGRConfig{}, &engine.FilterS{})
|
||||
tS2 := &ThresholdService{
|
||||
cfg: cfg,
|
||||
dm: db,
|
||||
cacheS: chS,
|
||||
filterSChan: filterSChan,
|
||||
server: server,
|
||||
thrs: thrs1,
|
||||
connChan: make(chan rpcclient.ClientConnector, 1),
|
||||
anz: anz,
|
||||
srvDep: srvDep,
|
||||
}
|
||||
var reply string
|
||||
if err := cfg.V1ReloadConfig(&config.ReloadArgs{
|
||||
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "tutmongo"),
|
||||
Section: config.THRESHOLDS_JSON,
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Expecting OK ,received %s", reply)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond) //need to switch to gorutine
|
||||
if !tS.IsRunning() {
|
||||
if !tS2.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
}
|
||||
if !db.IsRunning() {
|
||||
t.Errorf("Expected service to be running")
|
||||
serviceName := tS2.ServiceName()
|
||||
if !reflect.DeepEqual(serviceName, utils.ThresholdS) {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ThresholdS, serviceName)
|
||||
}
|
||||
err := tS.Start()
|
||||
if err == nil || err != utils.ErrServiceAlreadyRunning {
|
||||
t.Errorf("\nExpecting <%+v>,\n Received <%+v>", utils.ErrServiceAlreadyRunning, err)
|
||||
shouldRun := tS2.ShouldRun()
|
||||
if !reflect.DeepEqual(shouldRun, false) {
|
||||
t.Errorf("\nExpecting <false>,\n Received <%+v>", shouldRun)
|
||||
}
|
||||
err = tS.Reload()
|
||||
if err != nil {
|
||||
t.Errorf("\nExpecting <nil>,\n Received <%+v>", err)
|
||||
}
|
||||
cfg.ThresholdSCfg().Enabled = false
|
||||
cfg.GetReloadChan(config.THRESHOLDS_JSON) <- struct{}{}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if tS.IsRunning() {
|
||||
t.Errorf("Expected service to be down")
|
||||
}
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user