mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-11 18:16:24 +05:00
Updated tests
This commit is contained in:
committed by
Dan Christian Bogos
parent
69a48c2572
commit
3c920491cd
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
var (
|
||||
// Globals used
|
||||
dataDbCsv *DataManager // Each dataDb will have it's own sources to collect data
|
||||
dataDbCsv DataDB // Each dataDb will have it's own sources to collect data
|
||||
storDb LoadStorage
|
||||
lCfg *config.CGRConfig
|
||||
loader *TpReader
|
||||
@@ -86,24 +86,15 @@ func testLoaderITInitConfig(t *testing.T) {
|
||||
|
||||
func testLoaderITInitDataDB(t *testing.T) {
|
||||
var err error
|
||||
dbConn, err := NewDataDBConn(lCfg.DataDbCfg().Type,
|
||||
dataDbCsv, err := NewDataDBConn(lCfg.DataDbCfg().Type,
|
||||
lCfg.DataDbCfg().Host, lCfg.DataDbCfg().Port, lCfg.DataDbCfg().Name,
|
||||
lCfg.DataDbCfg().User, lCfg.DataDbCfg().Password, lCfg.GeneralCfg().DBDataEncoding,
|
||||
lCfg.DataDbCfg().Opts, lCfg.DataDbCfg().Items)
|
||||
if err != nil {
|
||||
t.Fatal("Error on dataDb connection: ", err.Error())
|
||||
}
|
||||
dataDbCsv = NewDataManager(dbConn, lCfg.CacheCfg(), nil)
|
||||
if lCfg.DataDbCfg().Type == utils.Internal {
|
||||
chIDs := []string{}
|
||||
for dbKey := range utils.CacheInstanceToPrefix { // clear only the DataDB
|
||||
chIDs = append(chIDs, dbKey)
|
||||
}
|
||||
Cache.Clear(chIDs)
|
||||
} else {
|
||||
if err = dbConn.Flush(utils.EmptyString); err != nil {
|
||||
t.Fatal("Error when flushing datadb")
|
||||
}
|
||||
if err = dataDbCsv.Flush(utils.EmptyString); err != nil {
|
||||
t.Fatal("Error when flushing datadb")
|
||||
}
|
||||
cacheChan := make(chan rpcclient.ClientConnector, 1)
|
||||
cacheChan <- Cache
|
||||
@@ -143,7 +134,7 @@ func testLoaderITRemoveLoad(t *testing.T) {
|
||||
t.Error("Failed validating data: ", err.Error())
|
||||
}
|
||||
}*/
|
||||
loader, err = NewTpReader(dataDbCsv.DataDB(), NewFileCSVStorage(utils.CSVSep,
|
||||
loader, err = NewTpReader(dataDbCsv, NewFileCSVStorage(utils.CSVSep,
|
||||
path.Join(*dataDir, "tariffplans", *tpCsvScenario)), "", "",
|
||||
[]string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
if err != nil {
|
||||
@@ -222,7 +213,7 @@ func testLoaderITLoadFromCSV(t *testing.T) {
|
||||
t.Error("Failed validating data: ", err.Error())
|
||||
}
|
||||
}*/
|
||||
loader, err = NewTpReader(dataDbCsv.DataDB(), NewFileCSVStorage(utils.CSVSep,
|
||||
loader, err = NewTpReader(dataDbCsv, NewFileCSVStorage(utils.CSVSep,
|
||||
path.Join(*dataDir, "tariffplans", *tpCsvScenario)), "", "",
|
||||
[]string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
if err != nil {
|
||||
@@ -527,7 +518,7 @@ func testLoaderITImportToStorDb(t *testing.T) {
|
||||
|
||||
// Loads data from storDb into dataDb
|
||||
func testLoaderITLoadFromStorDb(t *testing.T) {
|
||||
loader, _ := NewTpReader(dataDbCsv.DataDB(), storDb, utils.TestSQL, "", []string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
loader, _ := NewTpReader(dataDbCsv, storDb, utils.TestSQL, "", []string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
if err := loader.LoadDestinations(); err != nil && err.Error() != utils.NotFoundCaps {
|
||||
t.Error("Failed loading destinations: ", err.Error())
|
||||
}
|
||||
@@ -561,7 +552,7 @@ func testLoaderITLoadFromStorDb(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLoaderITLoadIndividualProfiles(t *testing.T) {
|
||||
loader, _ := NewTpReader(dataDbCsv.DataDB(), storDb, utils.TestSQL, "", []string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
loader, _ := NewTpReader(dataDbCsv, storDb, utils.TestSQL, "", []string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
// Load ratingPlans. This will also set destination keys
|
||||
if rps, err := storDb.GetTPRatingPlans(utils.TestSQL, "", nil); err != nil {
|
||||
t.Fatal("Could not retrieve rating plans")
|
||||
|
||||
@@ -102,78 +102,40 @@ func TestDataDBReload(t *testing.T) {
|
||||
RmtConns: []string{},
|
||||
RplConns: []string{},
|
||||
Items: map[string]*config.ItemOpt{
|
||||
utils.MetaAccounts: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaReverseDestinations: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaDestinations: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaRatingPlans: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaRatingProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaActions: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaActionPlans: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaAccountActionPlans: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaActionTriggers: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaSharedGroups: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaTimings: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaResourceProfile: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaStatQueues: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaResources: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaStatQueueProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaThresholds: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaThresholdProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaFilters: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaRouteProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaAttributeProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaDispatcherHosts: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaChargerProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaDispatcherProfiles: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaLoadIDs: {
|
||||
Replicate: false,
|
||||
Remote: false},
|
||||
utils.MetaAccounts: {Limit: -1},
|
||||
utils.MetaReverseDestinations: {Limit: -1},
|
||||
utils.MetaDestinations: {Limit: -1},
|
||||
utils.MetaRatingPlans: {Limit: -1},
|
||||
utils.MetaRatingProfiles: {Limit: -1},
|
||||
utils.MetaActions: {Limit: -1},
|
||||
utils.MetaActionPlans: {Limit: -1},
|
||||
utils.MetaAccountActionPlans: {Limit: -1},
|
||||
utils.MetaActionTriggers: {Limit: -1},
|
||||
utils.MetaSharedGroups: {Limit: -1},
|
||||
utils.MetaTimings: {Limit: -1},
|
||||
utils.MetaResourceProfile: {Limit: -1},
|
||||
utils.MetaStatQueues: {Limit: -1},
|
||||
utils.MetaResources: {Limit: -1},
|
||||
utils.MetaStatQueueProfiles: {Limit: -1},
|
||||
utils.MetaThresholds: {Limit: -1},
|
||||
utils.MetaThresholdProfiles: {Limit: -1},
|
||||
utils.MetaFilters: {Limit: -1},
|
||||
utils.MetaRouteProfiles: {Limit: -1},
|
||||
utils.MetaAttributeProfiles: {Limit: -1},
|
||||
utils.MetaDispatcherHosts: {Limit: -1},
|
||||
utils.MetaChargerProfiles: {Limit: -1},
|
||||
utils.MetaDispatcherProfiles: {Limit: -1},
|
||||
utils.MetaLoadIDs: {Limit: -1},
|
||||
utils.CacheVersions: {Limit: -1},
|
||||
|
||||
utils.CacheResourceFilterIndexes: {Limit: -1},
|
||||
utils.CacheStatFilterIndexes: {Limit: -1},
|
||||
utils.CacheThresholdFilterIndexes: {Limit: -1},
|
||||
utils.CacheRouteFilterIndexes: {Limit: -1},
|
||||
utils.CacheAttributeFilterIndexes: {Limit: -1},
|
||||
utils.CacheChargerFilterIndexes: {Limit: -1},
|
||||
utils.CacheDispatcherFilterIndexes: {Limit: -1},
|
||||
utils.CacheReverseFilterIndexes: {Limit: -1},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(oldcfg, db.oldDBCfg) {
|
||||
|
||||
@@ -63,6 +63,10 @@ func TestStorDBReload(t *testing.T) {
|
||||
srvMngr.AddServices(cdrS, ralS, schS, chrS,
|
||||
NewLoaderService(cfg, db, filterSChan, server,
|
||||
make(chan rpcclient.ClientConnector, 1), nil, anz, srvDep), db, stordb)
|
||||
if err := engine.InitStorDb(cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srvMngr.StartServices(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -209,6 +213,10 @@ func TestStorDBReloadVersion1(t *testing.T) {
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
stordb := NewStorDBService(cfg, srvDep)
|
||||
stordb.oldDBCfg = cfg.StorDbCfg().Clone()
|
||||
if err := engine.InitStorDb(cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = stordb.Start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -288,6 +296,9 @@ func TestStorDBReloadVersion2(t *testing.T) {
|
||||
cfg.StorDbCfg().Password = "CGRateS.org"
|
||||
stordb := NewStorDBService(cfg, srvDep)
|
||||
stordb.oldDBCfg = cfg.StorDbCfg().Clone()
|
||||
if err := engine.InitStorDb(cfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = stordb.Start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -357,7 +368,7 @@ func TestStorDBReloadVersion3(t *testing.T) {
|
||||
utils.Logger.SetLogLevel(7)
|
||||
filterSChan := make(chan *engine.FilterS, 1)
|
||||
filterSChan <- nil
|
||||
shdChan := utils.NewSyncedChan()
|
||||
// shdChan := utils.NewSyncedChan()
|
||||
cfg.ChargerSCfg().Enabled = true
|
||||
srvDep := map[string]*sync.WaitGroup{utils.DataDB: new(sync.WaitGroup)}
|
||||
cfg.StorDbCfg().Password = "CGRateS.org"
|
||||
@@ -368,6 +379,7 @@ func TestStorDBReloadVersion3(t *testing.T) {
|
||||
if err == nil || err.Error() != "can't conver StorDB of type internal to InternalDB" {
|
||||
t.Fatal(err)
|
||||
}
|
||||
/* the internal now uses its own cache
|
||||
err = stordb.Start()
|
||||
if err == nil {
|
||||
t.Fatal(err)
|
||||
@@ -380,6 +392,7 @@ func TestStorDBReloadVersion3(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
shdChan.CloseOnce()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
*/
|
||||
}
|
||||
|
||||
func TestStorDBReloadNewStorDBConnError(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user