From e025a0dc893ba031ef9465bf4c5f3d8268d8d8f0 Mon Sep 17 00:00:00 2001 From: ionutboangiu Date: Fri, 12 Apr 2024 11:13:05 +0300 Subject: [PATCH] Revise tests -ensure failure messages are useful -optimize tests with needlessly large sleep times -enforce a stricter margin for error --- apier/v1/apier2_it_test.go | 39 ++++++++++++----------- engine/caches_test.go | 56 +++++++++++++--------------------- engine/stats_test.go | 47 ++++++++++++++++++++++++++++ engine/thresholds_test.go | 33 +++++++++----------- engine/z_resources_test.go | 32 +++++++++---------- guardian/guardian_test.go | 41 ++++++++++++++----------- loaders/loader_test.go | 12 ++++---- sessions/sessionscover_test.go | 18 ++++++++--- 8 files changed, 161 insertions(+), 117 deletions(-) diff --git a/apier/v1/apier2_it_test.go b/apier/v1/apier2_it_test.go index 4e7dfd898..a21f7bc8d 100644 --- a/apier/v1/apier2_it_test.go +++ b/apier/v1/apier2_it_test.go @@ -404,40 +404,39 @@ func testAPIerSetActionPlanDfltTime(t *testing.T) { } else if reply1 != utils.OK { t.Errorf("Calling APIerSv1.SetActionPlan received: %s", reply1) } - var rply []*scheduler.ScheduledAction refTime := time.Now() + var rply []*scheduler.ScheduledAction if err := apierRPC.Call(context.Background(), utils.APIerSv1GetScheduledActions, scheduler.ArgsGetScheduledActions{}, &rply); err != nil { t.Fatal(err) } - for _, schedAct := range rply { + got := schedAct.NextRunTime switch schedAct.ActionPlanID { case "AP_WEEKLY": - t1 := refTime.AddDate(0, 0, 7) - if schedAct.NextRunTime.Before(t1.Add(-time.Second)) || - schedAct.NextRunTime.After(t1.Add(2*time.Second)) { - t.Errorf("Expected the nextRuntime to be after 1 week,but received: <%+v>", utils.ToJSON(schedAct)) + want := refTime.AddDate(0, 0, 7) // +1 week + if diff := want.Sub(got); diff < 0 || diff > 2*time.Second { + t.Errorf("%s scheduled date = %v, want %v (diff %v, margin 2s)", + schedAct.ActionPlanID, got.Format(time.StampMilli), want.Format(time.StampMilli), diff) } case "AP_DAILY": - t1 := refTime.AddDate(0, 0, 1) - if schedAct.NextRunTime.Before(t1.Add(-time.Second)) || - schedAct.NextRunTime.After(t1.Add(2*time.Second)) { - t.Errorf("Expected the nextRuntime to be after 1 day,but received: <%+v>", utils.ToJSON(schedAct)) + want := refTime.AddDate(0, 0, 1) // +1 day + if diff := want.Sub(got); diff < 0 || diff > 2*time.Second { + t.Errorf("%s scheduled date = %v, want %v (diff %v, margin 2s)", + schedAct.ActionPlanID, got.Format(time.StampMilli), want.Format(time.StampMilli), diff) } case "AP_HOURLY": - if schedAct.NextRunTime.Before(refTime.Add(59*time.Minute+59*time.Second)) || - schedAct.NextRunTime.After(refTime.Add(time.Hour+2*time.Second)) { - t.Errorf("Expected the nextRuntime to be after 1 hour,but received: <%+v>", utils.ToJSON(schedAct)) + want := refTime.Add(time.Hour) // +1h + if diff := want.Sub(got); diff < 0 || diff > 2*time.Second { + t.Errorf("%s scheduled date = %v, want %v (diff %v, margin 2s)", + schedAct.ActionPlanID, got.Format(time.StampMilli), want.Format(time.StampMilli), diff) } case "AP_MONTHLY": - // *monthly needs to mach exactly the day - expected := refTime.AddDate(0, 1, 0) - expected = time.Date(expected.Year(), expected.Month(), refTime.Day(), refTime.Hour(), - refTime.Minute(), refTime.Second(), 0, schedAct.NextRunTime.Location()) - if schedAct.NextRunTime.Before(expected.Add(-time.Second)) || - schedAct.NextRunTime.After(expected.Add(2*time.Second)) { - t.Errorf("Expected the nextRuntime to be after 1 month,but received: <%+v>", utils.ToJSON(schedAct)) + // *monthly needs to match exactly the day + want := refTime.AddDate(0, 1, 0) // +1 month + if diff := want.Sub(got); diff < 0 || diff > 2*time.Second { + t.Errorf("%s scheduled date = %v, want %v (diff %v, margin 2s)", + schedAct.ActionPlanID, got.Format(time.StampMilli), want.Format(time.StampMilli), diff) } } } diff --git a/engine/caches_test.go b/engine/caches_test.go index 9a69aff17..9afe0b503 100644 --- a/engine/caches_test.go +++ b/engine/caches_test.go @@ -283,49 +283,35 @@ func TestCacheSV1GetItem(t *testing.T) { } func TestCacheSV1GetItemExpiryTime(t *testing.T) { - tmp := Cache - - defer func() { - Cache = tmp - config.SetCgrConfig(config.NewDefaultCGRConfig()) - }() - Cache.Clear(nil) - args := &utils.ArgsGetCacheItemWithAPIOpts{ - ArgsGetCacheItem: utils.ArgsGetCacheItem{ - CacheID: "cacheID", - ItemID: "itemID", - }, - } + cacheID := "testCache" + itemID := "testItem" tscache := ltcache.NewTransCache( map[string]*ltcache.CacheConfig{ - "cacheID": { - MaxItems: 3, - TTL: time.Minute * 30, - StaticTTL: false, - OnEvicted: func(itmID string, value any) { - - }, + cacheID: { + MaxItems: -1, + TTL: 30 * time.Minute, }, }) - tscache.Set("cacheID", "itemID", "value", []string{}, true, "tId") - - cfg := config.NewDefaultCGRConfig() - db := NewInternalDB(nil, nil, true, cfg.DataDbCfg().Items) - dm := NewDataManager(db, cfg.CacheCfg(), nil) chS := &CacheS{ - cfg: cfg, - dm: dm, tCache: tscache, } - reply := now - loc, _ := time.LoadLocation("EST") - exp := now.Add(30 * time.Minute).In(loc).Minute() - if err := chS.V1GetItemExpiryTime(context.Background(), args, &reply); err != nil { - t.Error(err) - } else if reply.Minute() != exp { - t.Errorf("expected %+v,received %+v", exp, reply) - } + chS.tCache.Set(cacheID, itemID, "value", []string{}, true, "") + want := time.Now().Add(30 * time.Minute) + var got time.Time + if err := chS.V1GetItemExpiryTime(context.Background(), + &utils.ArgsGetCacheItemWithAPIOpts{ + ArgsGetCacheItem: utils.ArgsGetCacheItem{ + CacheID: cacheID, + ItemID: itemID, + }, + }, &got); err != nil { + t.Fatalf("V1GetItemExpiryTime(%q,%q): got unexpected err=%v", cacheID, itemID, err) + } + if diff := want.Sub(got); diff < 0 || diff > time.Millisecond { + t.Errorf("V1GetItemExpiryTime(%q,%q) = %v, want %v (diff %v, margin 1ms)", + cacheID, itemID, got.Format(time.StampMicro), want.Format(time.StampMicro), diff) + } } func TestCacheSV1RemoveItem(t *testing.T) { diff --git a/engine/stats_test.go b/engine/stats_test.go index e13e3893f..42f1f008b 100644 --- a/engine/stats_test.go +++ b/engine/stats_test.go @@ -1026,6 +1026,53 @@ func TestStatQueueStartLoop(t *testing.T) { } } +func TestStatQueueRunBackupStop(t *testing.T) { + cfg := config.NewDefaultCGRConfig() + cfg.StatSCfg().StoreInterval = 5 * time.Millisecond + data := NewInternalDB(nil, nil, true, cfg.DataDbCfg().Items) + dm := NewDataManager(data, cfg.CacheCfg(), nil) + tnt := "cgrates.org" + sqID := "testSQ" + sqS := &StatService{ + dm: dm, + storedStatQueues: utils.StringSet{ + sqID: struct{}{}, + }, + cgrcfg: cfg, + loopStopped: make(chan struct{}, 1), + stopBackup: make(chan struct{}), + } + value := &StatQueue{ + dirty: utils.BoolPointer(true), + Tenant: tnt, + ID: sqID, + } + Cache.SetWithoutReplicate(utils.CacheStatQueues, sqID, value, nil, true, "") + + // Backup loop checks for the state of the stopBackup + // channel after storing the stat queue. Channel can be + // safely closed beforehand. + close(sqS.stopBackup) + sqS.runBackup() + + want := &StatQueue{ + dirty: utils.BoolPointer(false), + Tenant: tnt, + ID: sqID, + } + if got, err := sqS.dm.GetStatQueue(tnt, sqID, true, false, utils.NonTransactional); err != nil { + t.Errorf("dm.GetStatQueue(%q,%q): got unexpected err=%v", tnt, sqID, err) + } else if !reflect.DeepEqual(got, want) { + t.Errorf("dm.GetStatQueue(%q,%q) = %v, want %v", tnt, sqID, got, want) + } + + select { + case <-sqS.loopStopped: + case <-time.After(time.Second): + t.Error("timed out waiting for loop to stop") + } +} + func TestStatQueueShutdown(t *testing.T) { utils.Logger.SetLogLevel(6) utils.Logger.SetSyslog(nil) diff --git a/engine/thresholds_test.go b/engine/thresholds_test.go index 99f5a462a..97ccb847a 100644 --- a/engine/thresholds_test.go +++ b/engine/thresholds_test.go @@ -1753,30 +1753,23 @@ func TestThresholdsRunBackupStop(t *testing.T) { cfg.ThresholdSCfg().StoreInterval = 5 * time.Millisecond data := NewInternalDB(nil, nil, true, cfg.DataDbCfg().Items) dm := NewDataManager(data, cfg.CacheCfg(), nil) + tnt := "cgrates.org" + thID := "Th1" tS := &ThresholdService{ dm: dm, storedTdIDs: utils.StringSet{ - "Th1": struct{}{}, + thID: struct{}{}, }, cgrcfg: cfg, loopStopped: make(chan struct{}, 1), stopBackup: make(chan struct{}), } - value := &Threshold{ dirty: utils.BoolPointer(true), - Tenant: "cgrates.org", - ID: "Th1", - } - - Cache.SetWithoutReplicate(utils.CacheThresholds, "Th1", value, nil, true, - utils.NonTransactional) - - exp := &Threshold{ - dirty: utils.BoolPointer(false), - Tenant: "cgrates.org", - ID: "Th1", + Tenant: tnt, + ID: thID, } + Cache.SetWithoutReplicate(utils.CacheThresholds, thID, value, nil, true, "") // Backup loop checks for the state of the stopBackup // channel after storing the threshold. Channel can be @@ -1784,11 +1777,15 @@ func TestThresholdsRunBackupStop(t *testing.T) { close(tS.stopBackup) tS.runBackup() - if rcv, err := tS.dm.GetThreshold("cgrates.org", "Th1", true, false, - utils.NonTransactional); err != nil { - t.Error(err) - } else if !reflect.DeepEqual(rcv, exp) { - t.Errorf("threshold: want %+v, got %+v", exp, rcv) + want := &Threshold{ + dirty: utils.BoolPointer(false), + Tenant: tnt, + ID: thID, + } + if got, err := tS.dm.GetThreshold(tnt, thID, true, false, utils.NonTransactional); err != nil { + t.Errorf("dm.GetThreshold(%q,%q): got unexpected err=%v", tnt, thID, err) + } else if !reflect.DeepEqual(got, want) { + t.Errorf("dm.GetThreshold(%q,%q) = %v, want %v", tnt, thID, got, want) } select { diff --git a/engine/z_resources_test.go b/engine/z_resources_test.go index 634dc5b76..e1167d348 100644 --- a/engine/z_resources_test.go +++ b/engine/z_resources_test.go @@ -5935,30 +5935,23 @@ func TestResourcesRunBackupStop(t *testing.T) { cfg.ResourceSCfg().StoreInterval = 5 * time.Millisecond data := NewInternalDB(nil, nil, true, cfg.DataDbCfg().Items) dm := NewDataManager(data, cfg.CacheCfg(), nil) + tnt := "cgrates.org" + resID := "Res1" rS := &ResourceService{ dm: dm, storedResources: utils.StringSet{ - "Res1": struct{}{}, + resID: struct{}{}, }, cgrcfg: cfg, loopStopped: make(chan struct{}, 1), stopBackup: make(chan struct{}), } - value := &Resource{ dirty: utils.BoolPointer(true), - Tenant: "cgrates.org", - ID: "Res1", - } - - Cache.SetWithoutReplicate(utils.CacheResources, "Res1", value, nil, true, - utils.NonTransactional) - - exp := &Resource{ - dirty: utils.BoolPointer(false), - Tenant: "cgrates.org", - ID: "Res1", + Tenant: tnt, + ID: resID, } + Cache.SetWithoutReplicate(utils.CacheResources, resID, value, nil, true, "") // Backup loop checks for the state of the stopBackup // channel after storing the resource. Channel can be @@ -5966,10 +5959,15 @@ func TestResourcesRunBackupStop(t *testing.T) { close(rS.stopBackup) rS.runBackup() - if rcv, err := rS.dm.GetResource("cgrates.org", "Res1", true, false, utils.NonTransactional); err != nil { - t.Error(err) - } else if !reflect.DeepEqual(rcv, exp) { - t.Errorf("resouce: want %+v, got %+v", exp, rcv) + want := &Resource{ + dirty: utils.BoolPointer(false), + Tenant: tnt, + ID: resID, + } + if got, err := rS.dm.GetResource(tnt, resID, true, false, ""); err != nil { + t.Errorf("dm.GetResource(%q,%q): got unexpected err=%v", tnt, resID, err) + } else if !reflect.DeepEqual(got, want) { + t.Errorf("dm.GetResource(%q,%q) = %v, want %v", tnt, resID, got, want) } select { diff --git a/guardian/guardian_test.go b/guardian/guardian_test.go index 35d6ea0cf..df5230d6c 100644 --- a/guardian/guardian_test.go +++ b/guardian/guardian_test.go @@ -28,16 +28,18 @@ import ( ) func delayHandler() error { - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) return nil } -// Forks 3 groups of workers and makes sure that the time for execution is the one we expect for all 15 goroutines (with 100ms ) +// TestGuardianMultipleKeys forks 3 groups of workers across 3 keys and ensures that the +// total execution time aligns with expectations for all 15 goroutines (guarding a handler +// with 10ms duration for its entire duration). func TestGuardianMultipleKeys(t *testing.T) { - tStart := time.Now() maxIter := 5 sg := new(sync.WaitGroup) keys := []string{"test1", "test2", "test3"} + tStart := time.Now() for i := 0; i < maxIter; i++ { for _, key := range keys { sg.Add(1) @@ -48,44 +50,49 @@ func TestGuardianMultipleKeys(t *testing.T) { } } sg.Wait() - mustExecDur := time.Duration(maxIter*100) * time.Millisecond - if execTime := time.Since(tStart); execTime < mustExecDur || - execTime > mustExecDur+100*time.Millisecond { - t.Errorf("Execution took: %v", execTime) + execTime := time.Since(tStart) + want := 10 * time.Millisecond * time.Duration(maxIter) + if diff := execTime - want; diff < 0 || diff > 5*time.Millisecond { + t.Errorf("total Guard duration for %d iterations over %d keys=%v: got %v want %v (diff %v, margin 5ms)", + maxIter, len(keys), keys, execTime, want, diff) } Guardian.lkMux.Lock() for _, key := range keys { if _, hasKey := Guardian.locks[key]; hasKey { - t.Errorf("Possible memleak for key: %s", key) + t.Errorf("Guardian.locks[%q]: key should not exist (possible memleak)", key) } } Guardian.lkMux.Unlock() } +// TestGuardianTimeout forks 3 groups of workers across 3 keys and ensures that the +// total execution time aligns with expectations for all 15 goroutines (guarding a handler +// with 10ms duration for half of its entire duration, 5ms). func TestGuardianTimeout(t *testing.T) { - tStart := time.Now() maxIter := 5 sg := new(sync.WaitGroup) keys := []string{"test1", "test2", "test3"} + tStart := time.Now() for i := 0; i < maxIter; i++ { for _, key := range keys { sg.Add(1) go func(key string) { - Guardian.Guard(delayHandler, 10*time.Millisecond, key) + Guardian.Guard(delayHandler, 5*time.Millisecond, key) sg.Done() }(key) } } sg.Wait() - mustExecDur := time.Duration(maxIter*10) * time.Millisecond - if execTime := time.Since(tStart); execTime < mustExecDur || - execTime > mustExecDur+100*time.Millisecond { - t.Errorf("Execution took: %v", execTime) + execTime := time.Since(tStart) + want := 5 * time.Millisecond * time.Duration(maxIter) + if diff := execTime - want; diff < 0 || diff > 5*time.Millisecond { + t.Errorf("total Guard duration for %d iterations over %d keys=%v: got %v want %v (diff %v, margin 5ms)", + maxIter, len(keys), keys, execTime, want, diff) } Guardian.lkMux.Lock() for _, key := range keys { if _, hasKey := Guardian.locks[key]; hasKey { - t.Error("Possible memleak") + t.Errorf("Guardian.locks[%q]: key should not exist (possible memleak)", key) } } Guardian.lkMux.Unlock() @@ -328,7 +335,7 @@ func TestGuardianGuardUnguardIDs(t *testing.T) { //for coverage purposes refID := utils.EmptyString lkIDs := []string{"test1", "test2", "test3"} - Guardian.GuardIDs(refID, time.Second, lkIDs...) + Guardian.GuardIDs(refID, 10*time.Millisecond, lkIDs...) Guardian.UnguardIDs(refID) if refID != utils.EmptyString { t.Errorf("\nExpected <%+v>, \nReceived <%+v>", utils.EmptyString, refID) @@ -337,7 +344,7 @@ func TestGuardianGuardUnguardIDs(t *testing.T) { func TestGuardianGuardUnguardIDsCase2(t *testing.T) { //for coverage purposes - lkIDs := []string{"test1", "test2", "test3"} + lkIDs := []string{"test4", "test5", "test6"} err := Guardian.Guard(func() error { return utils.ErrNotFound }, 10*time.Millisecond, lkIDs...) diff --git a/loaders/loader_test.go b/loaders/loader_test.go index 2ca7da92a..f8f15465a 100644 --- a/loaders/loader_test.go +++ b/loaders/loader_test.go @@ -3491,7 +3491,7 @@ func TestStoreLoadedDataDispatcherHosts(t *testing.T) { func TestStoreLoadedDataWithDelay(t *testing.T) { engine.Cache.Clear(nil) cfg := config.NewDefaultCGRConfig() - cfg.GeneralCfg().CachingDelay = 1 * time.Second + cfg.GeneralCfg().CachingDelay = 5 * time.Millisecond argExpect := &utils.AttrReloadCacheWithAPIOpts{ APIOpts: nil, Tenant: "", @@ -3530,13 +3530,13 @@ func TestStoreLoadedDataWithDelay(t *testing.T) { }, }, } - startTime := time.Now() + tStart := time.Now() if err := ldr.storeLoadedData(utils.MetaDispatcherHosts, lds, utils.MetaReload); err != nil { t.Error(err) } - elapsedTime := time.Since(startTime) - expectedDuration := 1 * time.Second - if elapsedTime < expectedDuration || elapsedTime >= 2*time.Second { - t.Errorf("Expected elapsed time of at least %v, but got %v", expectedDuration, elapsedTime) + got := time.Since(tStart) + want := cfg.GeneralCfg().CachingDelay + if diff := got - want; diff < 0 || diff > 4*time.Millisecond { + t.Errorf("storeLoadedData duration = %v, want at least %v (diff %v, margin 4ms)", got, want, diff) } } diff --git a/sessions/sessionscover_test.go b/sessions/sessionscover_test.go index 1af7cc842..ff576d580 100644 --- a/sessions/sessionscover_test.go +++ b/sessions/sessionscover_test.go @@ -213,16 +213,26 @@ func TestSetSTerminatorAutomaticTermination(t *testing.T) { dm := engine.NewDataManager(data, cfg.CacheCfg(), nil) sessions := NewSessionS(cfg, dm, nil) + sessionTTL := 3 * time.Millisecond opts := engine.MapEvent{ - utils.OptsSessionsTTL: "1s", + utils.OptsSessionsTTL: sessionTTL.String(), utils.OptsSessionsTTLLastUsage: "0s", } + tStart := time.Now() sessions.setSTerminator(ss, opts) + ss.lk.RLock() + done := ss.sTerminator.endChan + ss.lk.RUnlock() select { - case <-time.After(3 * time.Second): - t.Fatal("timeout") - case <-ss.sTerminator.endChan: + case <-time.After(5 * time.Millisecond): + t.Fatal("timeout waiting for terminator loop to end") + case <-done: + loopDur := time.Since(tStart) + want := 3 * time.Millisecond + if diff := loopDur - want; diff < 0 || diff > time.Millisecond { + t.Errorf("sTerminator loop duration = %v, want at least %v (diff %v, margin 1ms)", loopDur, want, diff) + } } }