Reduce time.Sleep duration in apier tests

This commit is contained in:
Trial97
2018-11-07 17:06:31 +02:00
committed by Dan Christian Bogos
parent a5d4d12032
commit 3bd46d7c18
19 changed files with 132 additions and 227 deletions

View File

@@ -29,7 +29,6 @@ import (
"net/rpc/jsonrpc"
"net/url"
"os"
"os/exec"
"path"
"reflect"
"strings"
@@ -830,7 +829,7 @@ func TestApierReloadCache(t *testing.T) {
expectedStats := &utils.CacheStats{
ReverseDestinations: 10,
RatingPlans: 1,
RatingProfiles: 1, // when it fails here is 2 is needed to investigate more
RatingProfiles: 2,
Actions: 1,
ActionPlans: 1,
AccountActionPlans: 1,
@@ -1278,7 +1277,7 @@ func TestApierLoadTariffPlanFromFolder(t *testing.T) {
} else if reply != "OK" {
t.Error("Calling ApierV1.LoadTariffPlanFromFolder got reply: ", reply)
}
time.Sleep(time.Duration(2 * time.Second))
time.Sleep(time.Second)
}
// For now just test that they execute without errors
@@ -1302,8 +1301,13 @@ func TestApierComputeReverse(t *testing.T) {
}
func TestApierResetDataAfterLoadFromFolder(t *testing.T) {
expStats := &utils.CacheStats{Destinations: 3, Actions: 6, ActionPlans: 7,
AccountActionPlans: 13, Aliases: 1, AttributeProfiles: 1} // We get partial cache info during load, maybe fix this in the future
expStats := &utils.CacheStats{
Destinations: 3,
Actions: 6,
ActionPlans: 7,
AccountActionPlans: 13,
Aliases: 1,
AttributeProfiles: 0} // Did not cache because it wasn't previously cached
var rcvStats *utils.CacheStats
if err := rater.Call("ApierV1.GetCacheStats", utils.AttrCacheStats{}, &rcvStats); err != nil {
t.Error("Got error on ApierV1.GetCacheStats: ", err.Error())
@@ -1896,5 +1900,7 @@ func TestApierPing(t *testing.T) {
// Simply kill the engine after we are done with tests within this file
func TestApierStopEngine(t *testing.T) {
exec.Command("pkill", "cgr-engine").Run()
if err := engine.KillEngine(100); err != nil {
t.Error(err)
}
}

View File

@@ -94,12 +94,7 @@ func testAttributeSInitCfg(t *testing.T) {
}
alsPrfCfg.DataFolderPath = alsPrfDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(alsPrfCfg)
switch alsPrfConfigDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
alsPrfDelay = 2000
default:
alsPrfDelay = 1000
}
alsPrfDelay = 1000
}
func testAttributeSInitDataDb(t *testing.T) {
@@ -837,7 +832,7 @@ func testAttributeSPing(t *testing.T) {
}
func testAttributeSKillEngine(t *testing.T) {
if err := engine.KillEngine(alsPrfDelay); err != nil {
if err := engine.KillEngine(100); err != nil {
t.Error(err)
}
}

View File

@@ -69,7 +69,7 @@ func testCDReInitCfg(t *testing.T) {
}
cdreCfg.DataFolderPath = alsPrfDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(cdreCfg)
cdreDelay = 2000
cdreDelay = 1000
}
func testCDReInitDataDb(t *testing.T) {
@@ -140,7 +140,7 @@ func testCDReAddCDRs(t *testing.T) {
t.Error("Unexpected reply received: ", reply)
}
}
time.Sleep(time.Duration(cdreDelay) * time.Millisecond)
time.Sleep(100 * time.Millisecond)
}
func testCDReExportCDRs(t *testing.T) {
@@ -157,7 +157,7 @@ func testCDReExportCDRs(t *testing.T) {
}
func testCDReKillEngine(t *testing.T) {
if err := engine.KillEngine(cdreDelay); err != nil {
if err := engine.KillEngine(100); err != nil {
t.Error(err)
}
}

View File

@@ -146,7 +146,6 @@ func TestCDRStatsitGetMetrics1(t *testing.T) {
// Test stats persistence
func TestCDRStatsitStatsPersistence(t *testing.T) {
time.Sleep(time.Duration(2) * time.Second) // Allow stats to be updated in dataDb
if _, err := engine.StopStartEngine(cdrstCfgPath, *waitRater); err != nil {
t.Fatal(err)
}

View File

@@ -101,12 +101,7 @@ func testChargerSInitCfg(t *testing.T) {
}
chargerCfg.DataFolderPath = *dataDir
config.SetCgrConfig(chargerCfg)
switch chargerConfigDIR {
case "tutmongo":
chargerDelay = 2000
default:
chargerDelay = 1000
}
chargerDelay = 1000
}
func testChargerSInitDataDb(t *testing.T) {
@@ -320,7 +315,7 @@ func testChargerSPing(t *testing.T) {
}
func testChargerSKillEngine(t *testing.T) {
if err := engine.KillEngine(chargerDelay); err != nil {
if err := engine.KillEngine(100); err != nil {
t.Error(err)
}
}

View File

@@ -112,12 +112,7 @@ func testV1FIdxLoadConfig(t *testing.T) {
if tSv1Cfg, err = config.NewCGRConfigFromFolder(tSv1CfgPath); err != nil {
t.Error(err)
}
switch tSv1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
thdsDelay = 4000
default:
thdsDelay = 1000
}
thdsDelay = 1000
}
func testV1FIdxdxInitDataDb(t *testing.T) {

View File

@@ -99,12 +99,7 @@ func testV1FIdxCaLoadConfig(t *testing.T) {
if tSv1Cfg, err = config.NewCGRConfigFromFolder(tSv1CfgPath); err != nil {
t.Error(err)
}
switch tSv1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
thdsDelay = 4000
default:
thdsDelay = 1000
}
thdsDelay = 1000
}
func testV1FIdxCaInitDataDb(t *testing.T) {

View File

@@ -82,12 +82,7 @@ func testFilterInitCfg(t *testing.T) {
}
filterCfg.DataFolderPath = filterDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(filterCfg)
switch filterConfigDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
filterDelay = 2000
default:
filterDelay = 1000
}
filterDelay = 1000
}
// Wipe out the cdr database
@@ -211,7 +206,7 @@ func testFilterGetFilterAfterRemove(t *testing.T) {
}
func testFilterKillEngine(t *testing.T) {
if err := engine.KillEngine(filterDelay); err != nil {
if err := engine.KillEngine(100); err != nil {
t.Error(err)
}
}

View File

@@ -20,7 +20,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package v1
/* Need to investigate why add *default key Item1 in mongo
import (
"net/rpc"
"net/rpc/jsonrpc"
@@ -80,12 +79,7 @@ func testPrecacheInitCfg(t *testing.T) {
}
precacheCfg.DataFolderPath = precacheDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(precacheCfg)
switch precacheConfigDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
precacheDelay = 2000
default:
precacheDelay = 1000
}
precacheDelay = 1000
}
func testPrecacheResetDataDB(t *testing.T) {
@@ -102,7 +96,7 @@ func testPrecacheStartEngine(t *testing.T) {
func testPrecacheRpcConn(t *testing.T) {
var err error
precacheRPC, err = jsonrpc.Dial("tcp", precacheCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
precacheRPC, err = jsonrpc.Dial("tcp", precacheCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
if err != nil {
t.Fatal(err)
}
@@ -123,147 +117,135 @@ func testPrecacheGetCacheStatsBeforeLoad(t *testing.T) {
var reply *map[string]*ltcache.CacheStats
cacheIDs := []string{}
expectedStats := &map[string]*ltcache.CacheStats{
"*default": &ltcache.CacheStats{
"*default": {
Items: 0,
Groups: 0,
},
"account_action_plans": &ltcache.CacheStats{
"account_action_plans": {
Items: 0,
Groups: 0,
},
"action_plans": &ltcache.CacheStats{
"action_plans": {
Items: 0,
Groups: 0,
},
"action_triggers": &ltcache.CacheStats{
"action_triggers": {
Items: 0,
Groups: 0,
},
"actions": &ltcache.CacheStats{
"actions": {
Items: 0,
Groups: 0,
},
"aliases": &ltcache.CacheStats{
"aliases": {
Items: 0,
Groups: 0,
},
"attribute_filter_indexes": &ltcache.CacheStats{
"attribute_filter_indexes": {
Items: 0,
Groups: 0,
},
"attribute_filter_revindexes": &ltcache.CacheStats{
"attribute_profiles": {
Items: 0,
Groups: 0,
},
"attribute_profiles": &ltcache.CacheStats{
"cdr_stats": {
Items: 0,
Groups: 0,
},
"cdr_stats": &ltcache.CacheStats{
"charger_filter_indexes": {
Items: 0,
Groups: 0,
},
"derived_chargers": &ltcache.CacheStats{
"charger_profiles": {
Items: 0,
Groups: 0,
},
"destinations": &ltcache.CacheStats{
"derived_chargers": {
Items: 0,
Groups: 0,
},
"event_resources": &ltcache.CacheStats{
"destinations": {
Items: 0,
Groups: 0,
},
"filters": &ltcache.CacheStats{
"event_resources": {
Items: 0,
Groups: 0,
},
"lcr_rules": &ltcache.CacheStats{
"filters": {
Items: 0,
Groups: 0,
},
"rating_plans": &ltcache.CacheStats{
"lcr_rules": {
Items: 0,
Groups: 0,
},
"rating_profiles": &ltcache.CacheStats{
"rating_plans": {
Items: 0,
Groups: 0,
},
"resource_filter_indexes": &ltcache.CacheStats{
"rating_profiles": {
Items: 0,
Groups: 0,
},
"resource_filter_revindexes": &ltcache.CacheStats{
"resource_filter_indexes": {
Items: 0,
Groups: 0,
},
"resource_profiles": &ltcache.CacheStats{
"resource_profiles": {
Items: 0,
Groups: 0,
},
"resources": &ltcache.CacheStats{
"resources": {
Items: 0,
Groups: 0,
},
"reverse_aliases": &ltcache.CacheStats{
"reverse_aliases": {
Items: 0,
Groups: 0,
},
"reverse_destinations": &ltcache.CacheStats{
"reverse_destinations": {
Items: 0,
Groups: 0,
},
"shared_groups": &ltcache.CacheStats{
"shared_groups": {
Items: 0,
Groups: 0,
},
"stat_filter_indexes": &ltcache.CacheStats{
"stat_filter_indexes": {
Items: 0,
Groups: 0,
},
"stat_filter_revindexes": &ltcache.CacheStats{
"statqueue_profiles": {
Items: 0,
Groups: 0,
},
"statqueue_profiles": &ltcache.CacheStats{
"statqueues": {
Items: 0,
Groups: 0,
},
"statqueues": &ltcache.CacheStats{
"supplier_filter_indexes": {
Items: 0,
Groups: 0,
},
"supplier_filter_indexes": &ltcache.CacheStats{
"supplier_profiles": {
Items: 0,
Groups: 0,
},
"supplier_filter_revindexes": &ltcache.CacheStats{
"threshold_filter_indexes": {
Items: 0,
Groups: 0,
},
"supplier_profiles": &ltcache.CacheStats{
"threshold_profiles": {
Items: 0,
Groups: 0,
},
"threshold_filter_indexes": &ltcache.CacheStats{
"thresholds": {
Items: 0,
Groups: 0,
},
"threshold_filter_revindexes": &ltcache.CacheStats{
Items: 0,
Groups: 0,
},
"threshold_profiles": &ltcache.CacheStats{
Items: 0,
Groups: 0,
},
"thresholds": &ltcache.CacheStats{
Items: 0,
Groups: 0,
},
"timings": &ltcache.CacheStats{
"timings": {
Items: 0,
Groups: 0,
},
@@ -281,16 +263,15 @@ func testPrecacheFromFolder(t *testing.T) {
if err := precacheRPC.Call("ApierV1.LoadTariffPlanFromFolder", attrs, &reply); err != nil {
t.Error(err)
}
time.Sleep(200 * time.Millisecond)
time.Sleep(500 * time.Millisecond)
}
func testPrecacheRestartEngine(t *testing.T) {
time.Sleep(2 * time.Second)
if _, err := engine.StopStartEngine(precacheCfgPath, precacheDelay); err != nil {
t.Fatal(err)
}
var err error
precacheRPC, err = jsonrpc.Dial("tcp", precacheCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
precacheRPC, err = jsonrpc.Dial("tcp", precacheCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
if err != nil {
t.Fatal("Could not connect to rater: ", err.Error())
}
@@ -300,147 +281,135 @@ func testPrecacheGetCacheStatsAfterRestart(t *testing.T) {
var reply *map[string]*ltcache.CacheStats
cacheIDs := []string{}
expectedStats := &map[string]*ltcache.CacheStats{
"*default": &ltcache.CacheStats{
"*default": {
Items: 0,
Groups: 0,
},
"account_action_plans": &ltcache.CacheStats{
Items: 5,
"account_action_plans": {
Items: 5, //5
Groups: 0,
},
"action_plans": &ltcache.CacheStats{
"action_plans": {
Items: 4,
Groups: 0,
},
"action_triggers": &ltcache.CacheStats{
"action_triggers": {
Items: 4, // expected to have 4 items
Groups: 0,
},
"actions": &ltcache.CacheStats{
"actions": {
Items: 9, // expected to have 9 items
Groups: 0,
},
"aliases": &ltcache.CacheStats{
"aliases": {
Items: 1,
Groups: 0,
},
"attribute_filter_indexes": &ltcache.CacheStats{
"attribute_filter_indexes": {
Items: 0,
Groups: 0,
},
"attribute_filter_revindexes": &ltcache.CacheStats{
Items: 0,
Groups: 0,
},
"attribute_profiles": &ltcache.CacheStats{
"attribute_profiles": {
Items: 1,
Groups: 0,
},
"cdr_stats": &ltcache.CacheStats{
"cdr_stats": {
Items: 0,
Groups: 0,
},
"derived_chargers": &ltcache.CacheStats{
"charger_filter_indexes": {
Items: 0,
Groups: 0,
},
"charger_profiles": {
Items: 0,
Groups: 0,
},
"derived_chargers": {
Items: 1, // expected to have 1 item
Groups: 0,
},
"destinations": &ltcache.CacheStats{
Items: 8,
"destinations": {
Items: 5,
Groups: 0,
},
"event_resources": &ltcache.CacheStats{
"event_resources": {
Items: 0,
Groups: 0,
},
"filters": &ltcache.CacheStats{
"filters": {
Items: 16, // expected to have 16 items
Groups: 0,
},
"lcr_rules": &ltcache.CacheStats{
"lcr_rules": {
Items: 5, // expected to have 5 items
Groups: 0,
},
"rating_plans": &ltcache.CacheStats{
"rating_plans": {
Items: 4, // expected to have 4 items
Groups: 0,
},
"rating_profiles": &ltcache.CacheStats{
"rating_profiles": {
Items: 10, // expected to have 10 items
Groups: 0,
},
"resource_filter_indexes": &ltcache.CacheStats{
"resource_filter_indexes": {
Items: 0,
Groups: 0,
},
"resource_filter_revindexes": &ltcache.CacheStats{
Items: 0,
"resource_profiles": {
Items: 3,
Groups: 0,
},
"resource_profiles": &ltcache.CacheStats{
Items: 4,
Groups: 0,
},
"resources": &ltcache.CacheStats{
"resources": {
Items: 3, //expected to have 3 items
Groups: 0,
},
"reverse_aliases": &ltcache.CacheStats{
"reverse_aliases": {
Items: 2,
Groups: 0,
},
"reverse_destinations": &ltcache.CacheStats{
Items: 10,
"reverse_destinations": {
Items: 7,
Groups: 0,
},
"shared_groups": &ltcache.CacheStats{
"shared_groups": {
Items: 1,
Groups: 0,
},
"stat_filter_indexes": &ltcache.CacheStats{
"stat_filter_indexes": {
Items: 0,
Groups: 0,
},
"stat_filter_revindexes": &ltcache.CacheStats{
Items: 0,
"statqueue_profiles": {
Items: 1,
Groups: 0,
},
"statqueue_profiles": &ltcache.CacheStats{
Items: 2,
Groups: 0,
},
"statqueues": &ltcache.CacheStats{
"statqueues": {
Items: 1, // expected to have 1 item
Groups: 0,
},
"supplier_filter_indexes": &ltcache.CacheStats{
"supplier_filter_indexes": {
Items: 0,
Groups: 0,
},
"supplier_filter_revindexes": &ltcache.CacheStats{
Items: 0,
Groups: 0,
},
"supplier_profiles": &ltcache.CacheStats{
"supplier_profiles": {
Items: 3, // expected to have 3 items
Groups: 0,
},
"threshold_filter_indexes": &ltcache.CacheStats{
"threshold_filter_indexes": {
Items: 0,
Groups: 0,
},
"threshold_filter_revindexes": &ltcache.CacheStats{
Items: 0,
"threshold_profiles": {
Items: 7,
Groups: 0,
},
"threshold_profiles": &ltcache.CacheStats{
Items: 9,
Groups: 0,
},
"thresholds": &ltcache.CacheStats{
"thresholds": {
Items: 7, // expected to have 7 items
Groups: 0,
},
"timings": &ltcache.CacheStats{
"timings": {
Items: 0,
Groups: 0,
},
@@ -457,4 +426,3 @@ func testPrecacheKillEngine(t *testing.T) {
t.Error(err)
}
}
*/

View File

@@ -87,12 +87,8 @@ func testV1RsLoadConfig(t *testing.T) {
if rlsV1Cfg, err = config.NewCGRConfigFromFolder(rlsV1CfgPath); err != nil {
t.Error(err)
}
switch rlsV1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
resDelay = 4000
default:
resDelay = 2000
}
resDelay = 1000
}
func testV1RsInitDataDb(t *testing.T) {
@@ -128,7 +124,7 @@ func testV1RsFromFolder(t *testing.T) {
if err := rlsV1Rpc.Call("ApierV1.LoadTariffPlanFromFolder", attrs, &reply); err != nil {
t.Error(err)
}
time.Sleep(time.Duration(1000) * time.Millisecond)
time.Sleep(500 * time.Millisecond)
}
@@ -144,12 +140,10 @@ func testV1RsGetResourcesForEvent(t *testing.T) {
if err := rlsV1Rpc.Call(utils.ResourceSv1GetResourcesForEvent, args, &reply); err == nil || err.Error() != utils.ErrNotFound.Error() {
t.Error(err)
}
time.Sleep(time.Duration(500) * time.Millisecond)
args.CGREvent.Event = map[string]interface{}{"Destination": "10", "Account": "1001"}
if err := rlsV1Rpc.Call(utils.ResourceSv1GetResourcesForEvent, args, &reply); err != nil {
t.Error(err)
}
time.Sleep(time.Duration(500) * time.Millisecond)
if reply == nil {
t.Errorf("Expecting reply to not be nil")
// reply shoud not be nil so exit function
@@ -173,7 +167,6 @@ func testV1RsGetResourcesForEvent(t *testing.T) {
if err := rlsV1Rpc.Call(utils.ResourceSv1GetResourcesForEvent, args, &reply); err != nil {
t.Error(err)
}
time.Sleep(time.Duration(500) * time.Millisecond)
if len(*reply) != 1 {
t.Errorf("Expecting: %+v, received: %+v", 2, len(*reply))
}
@@ -182,7 +175,6 @@ func testV1RsGetResourcesForEvent(t *testing.T) {
if err := rlsV1Rpc.Call(utils.ResourceSv1GetResourcesForEvent, args, &reply); err != nil {
t.Error(err)
}
time.Sleep(time.Duration(500) * time.Millisecond)
if len(*reply) != 1 {
t.Errorf("Expecting: %+v, received: %+v", 1, len(*reply))
}
@@ -374,7 +366,7 @@ func testV1RsAllocateResource(t *testing.T) {
t.Error(err)
}
eAllocationMsg = "ResGroup1"
time.Sleep(time.Duration(1000) * time.Millisecond) // Give time for allocations on first resource to expire
time.Sleep(time.Second) // Give time for allocations on first resource to expire
argsRU = utils.ArgRSv1ResourceUsage{
UsageID: "651a8db2-4f67-4cf8-b622-169e8a482e55", // same ID should be accepted by first group since the previous resource should be expired
@@ -560,7 +552,6 @@ func testV1RsDBStore(t *testing.T) {
if err != nil {
t.Fatal("Could not connect to rater: ", err.Error())
}
time.Sleep(100 * time.Millisecond)
rs = new(engine.Resources)
args = &utils.ArgRSv1ResourceUsage{
CGREvent: utils.CGREvent{
@@ -589,7 +580,6 @@ func testV1RsDBStore(t *testing.T) {
}
}
}
time.Sleep(time.Duration(1) * time.Second)
}
func testV1RsGetResourceProfileBeforeSet(t *testing.T) {

View File

@@ -78,7 +78,6 @@ func TestSessionSv1ItStartEngine(t *testing.T) {
if _, err := engine.StopStartEngine(sSv1CfgPath2, 100); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
func TestSessionSv1ItRpcConn(t *testing.T) {
@@ -109,7 +108,6 @@ func TestSessionSv1ItTPFromFolder(t *testing.T) {
attrs, &loadInst); err != nil {
t.Error(err)
}
time.Sleep(time.Millisecond) // Give time for scheduler to execute topups
}
func TestSessionSv1ItGetThreshold(t *testing.T) {

View File

@@ -83,7 +83,6 @@ func TestSSv1ItStartEngine(t *testing.T) {
if _, err := engine.StopStartEngine(sSv1CfgPath, 100); err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
}
func TestSSv1ItRpcConn(t *testing.T) {
@@ -124,7 +123,6 @@ func TestSSv1ItTPFromFolder(t *testing.T) {
attrs, &loadInst); err != nil {
t.Error(err)
}
time.Sleep(time.Millisecond) // Give time for scheduler to execute topups
}
func TestSSv1ItAuth(t *testing.T) {

View File

@@ -109,12 +109,7 @@ func testV1STSLoadConfig(t *testing.T) {
if stsV1Cfg, err = config.NewCGRConfigFromFolder(stsV1CfgPath); err != nil {
t.Error(err)
}
switch stsV1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
statsDelay = 4000
default:
statsDelay = 2000
}
statsDelay = 1000
}
func testV1STSInitDataDb(t *testing.T) {
@@ -262,7 +257,7 @@ func testV1STSProcessEvent(t *testing.T) {
}
func testV1STSGetStatsAfterRestart(t *testing.T) {
time.Sleep(1 * time.Second)
time.Sleep(time.Second)
if _, err := engine.StopStartEngine(stsV1CfgPath, statsDelay); err != nil {
t.Fatal(err)
}
@@ -271,7 +266,6 @@ func testV1STSGetStatsAfterRestart(t *testing.T) {
if err != nil {
t.Fatal("Could not connect to rater: ", err.Error())
}
time.Sleep(1 * time.Second)
//get stats metrics after restart
expectedMetrics2 := map[string]string{
@@ -292,7 +286,6 @@ func testV1STSGetStatsAfterRestart(t *testing.T) {
} else if !reflect.DeepEqual(expectedMetrics2, metrics2) {
t.Errorf("After restat expecting: %+v, received reply: %s", expectedMetrics2, metrics2)
}
time.Sleep(1 * time.Second)
}
func testV1STSSetStatQueueProfile(t *testing.T) {
@@ -400,7 +393,7 @@ func testV1STSUpdateStatQueueProfile(t *testing.T) {
} else if result != utils.OK {
t.Error("Unexpected reply returned", result)
}
time.Sleep(time.Duration(1 * time.Second))
time.Sleep(time.Second)
var reply *engine.StatQueueProfile
if err := stsV1Rpc.Call("ApierV1.GetStatQueueProfile",
&utils.TenantID{Tenant: "cgrates.org", ID: "TEST_PROFILE1"}, &reply); err != nil {

View File

@@ -80,7 +80,6 @@ func TestSuplSV1ITMySQL(t *testing.T) {
func TestSuplSV1ITMongo(t *testing.T) {
splSv1ConfDIR = "tutmongo"
time.Sleep(time.Duration(2 * time.Second)) // give time for engine to start
for _, stest := range sTestsSupplierSV1 {
t.Run(splSv1ConfDIR, stest)
}
@@ -92,12 +91,7 @@ func testV1SplSLoadConfig(t *testing.T) {
if splSv1Cfg, err = config.NewCGRConfigFromFolder(splSv1CfgPath); err != nil {
t.Error(err)
}
switch splSv1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
splsDelay = 4000
default:
splsDelay = 1000
}
splsDelay = 1000
}
func testV1SplSInitDataDb(t *testing.T) {
@@ -356,7 +350,7 @@ func testV1SplSGetHighestCostSuppliers(t *testing.T) {
utils.Destination: "1002",
utils.SetupTime: time.Date(2017, 12, 1, 14, 25, 0, 0, time.UTC),
utils.Usage: "1m20s",
"DistincMatch": "*highest_cost",
"DistinctMatch": "*highest_cost",
},
},
}
@@ -397,6 +391,7 @@ func testV1SplSGetHighestCostSuppliers(t *testing.T) {
} else if !reflect.DeepEqual(eSpls, suplsReply) {
t.Errorf("Expecting: %s, received: %s",
utils.ToJSON(eSpls), utils.ToJSON(suplsReply))
panic(utils.ToJSON(suplsReply))
}
}
@@ -543,7 +538,7 @@ func testV1SplSPolulateStatsForQOS(t *testing.T) {
} else if !reflect.DeepEqual(reply, expected) {
t.Errorf("Expecting: %+v, received: %+v", expected, reply)
}
time.Sleep(100 * time.Millisecond)
}
func testV1SplSGetQOSSuppliers(t *testing.T) {
@@ -809,6 +804,7 @@ func testV1SplSGetQOSSuppliersFiltred2(t *testing.T) {
} else if !reflect.DeepEqual(eSpls, suplsReply) {
t.Errorf("Expecting: %s, received: %s",
utils.ToJSON(eSpls), utils.ToJSON(suplsReply))
panic(utils.ToJSON(suplsReply))
}
}

View File

@@ -178,7 +178,6 @@ func TestTSV1ITMySQL(t *testing.T) {
func TestTSV1ITMongo(t *testing.T) {
tSv1ConfDIR = "tutmongo"
time.Sleep(time.Duration(2 * time.Second)) // give time for engine to start
for _, stest := range sTestsThresholdSV1 {
t.Run(tSv1ConfDIR, stest)
}
@@ -190,12 +189,7 @@ func testV1TSLoadConfig(t *testing.T) {
if tSv1Cfg, err = config.NewCGRConfigFromFolder(tSv1CfgPath); err != nil {
t.Error(err)
}
switch tSv1ConfDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
thdsDelay = 4000
default:
thdsDelay = 2000
}
thdsDelay = 1000
}
func testV1TSInitDataDb(t *testing.T) {
@@ -335,7 +329,6 @@ func testV1TSGetThresholdsAfterRestart(t *testing.T) {
if err != nil {
t.Fatal("Could not connect to rater: ", err.Error())
}
time.Sleep(time.Duration(1 * time.Second))
var td engine.Threshold
if err := tSv1Rpc.Call(utils.ThresholdSv1GetThreshold,
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_ACNT_BALANCE_1"}, &td); err != nil {
@@ -399,7 +392,7 @@ func testV1TSUpdateThresholdProfile(t *testing.T) {
} else if result != utils.OK {
t.Error("Unexpected reply returned", result)
}
time.Sleep(time.Duration(100 * time.Millisecond)) // mongo is async
time.Sleep(time.Duration(*waitRater) * time.Millisecond) // mongo is async
var reply *engine.ThresholdProfile
if err := tSv1Rpc.Call("ApierV1.GetThresholdProfile",
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_Test"}, &reply); err != nil {
@@ -421,7 +414,7 @@ func testV1TSRemoveThresholdProfile(t *testing.T) {
if err := tSv1Rpc.Call("ApierV1.GetThresholdProfile",
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_Test"}, &sqp); err == nil ||
err.Error() != utils.ErrNotFound.Error() {
t.Error(err)
t.Errorf("Recived %s and the error:%+v", utils.ToJSON(sqp), err)
}
}

View File

@@ -82,12 +82,7 @@ func testTPInitCfg(t *testing.T) {
}
tpCfg.DataFolderPath = tpDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(tpCfg)
switch tpConfigDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
tpDelay = 4000
default:
tpDelay = 2000
}
tpDelay = 1000
}
// Wipe out the cdr database
@@ -114,7 +109,6 @@ func testTPRpcConn(t *testing.T) {
}
func testTPImportTPFromFolderPath(t *testing.T) {
time.Sleep(time.Duration(1 * time.Second))
var reply string
if err := tpRPC.Call("ApierV1.ImportTariffPlanFromFolder",
utils.AttrImportTPFromFolder{TPid: "TEST_TPID2",
@@ -123,7 +117,7 @@ func testTPImportTPFromFolderPath(t *testing.T) {
} else if reply != utils.OK {
t.Error("Calling ApierV1.ImportTarrifPlanFromFolder got reply: ", reply)
}
time.Sleep(time.Duration(2 * time.Second))
time.Sleep(500 * time.Millisecond)
}
func testTPExportTPToFolder(t *testing.T) {
@@ -147,7 +141,7 @@ func testTPExportTPToFolder(t *testing.T) {
} else if !reflect.DeepEqual(len(expectedTPStas.ExportedFiles), len(reply.ExportedFiles)) {
t.Errorf("Expecting : %+v, received: %+v", len(expectedTPStas.ExportedFiles), len(reply.ExportedFiles))
}
time.Sleep(time.Duration(2 * time.Second))
time.Sleep(500 * time.Millisecond)
}

View File

@@ -90,12 +90,7 @@ func testTPAccActionsInitCfg(t *testing.T) {
}
tpAccActionsCfg.DataFolderPath = tpAccActionsDataDir // Share DataFolderPath through config towards StoreDb for Flush()
config.SetCgrConfig(tpAccActionsCfg)
switch tpAccActionsConfigDIR {
case "tutmongo": // Mongo needs more time to reset db, need to investigate
tpAccActionsDelay = 2000
default:
tpAccActionsDelay = 1000
}
tpAccActionsDelay = 1000
}
// Wipe out the cdr database

View File

@@ -132,12 +132,12 @@ func testTPAccPlansSetTPAccPlan(t *testing.T) {
TPid: "TPAcc",
ID: "ID",
ActionPlan: []*utils.TPActionTiming{
&utils.TPActionTiming{
{
ActionsId: "AccId",
TimingId: "TimingID",
Weight: 10,
},
&utils.TPActionTiming{
{
ActionsId: "AccId2",
TimingId: "TimingID2",
Weight: 11,
@@ -178,17 +178,17 @@ func testTPAccPlansGetTPAccPlanIds(t *testing.T) {
func testTPAccPlansUpdateTPAccPlan(t *testing.T) {
tpAccPlan.ActionPlan = []*utils.TPActionTiming{
&utils.TPActionTiming{
{
ActionsId: "AccId",
TimingId: "TimingID",
Weight: 10,
},
&utils.TPActionTiming{
{
ActionsId: "AccId2",
TimingId: "TimingID2",
Weight: 11,
},
&utils.TPActionTiming{
{
ActionsId: "AccId3",
TimingId: "TimingID3",
Weight: 12,

View File

@@ -649,9 +649,9 @@ func (ms *MongoStorage) GetKeysForPrefix(prefix string) (result []string, err er
result = append(result, utils.StatQueueProfilePrefix+utils.ConcatenatedKey(idResult.Tenant, idResult.Id))
}
case utils.AccountActionPlansPrefix:
iter := db.C(colAAp).Find(bson.M{"id": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"id": 1}).Iter()
for iter.Next(&idResult) {
result = append(result, utils.AccountActionPlansPrefix+idResult.Id)
iter := db.C(colAAp).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter()
for iter.Next(&keyResult) {
result = append(result, utils.AccountActionPlansPrefix+keyResult.Key)
}
case utils.TimingsPrefix:
iter := db.C(colTmg).Find(bson.M{"id": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"id": 1}).Iter()