diff --git a/apier/v1/resourcesv1.go b/apier/v1/resourcesv1.go index 3eef8142a..082bda9f8 100644 --- a/apier/v1/resourcesv1.go +++ b/apier/v1/resourcesv1.go @@ -132,20 +132,26 @@ func (apierV1 *ApierV1) SetResourceProfile(arg *ResourceWithCache, reply *string if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { return utils.APIErrorHandler(err) } - if err := apierV1.DataManager.SetResource( - &engine.Resource{Tenant: arg.Tenant, - ID: arg.ID, - Usages: make(map[string]*engine.ResourceUsage)}); err != nil { - return utils.APIErrorHandler(err) - } - //handle caching for Resource - argCache = engine.ArgsGetCacheItem{ - CacheID: utils.CacheResources, - ItemID: arg.TenantID(), - } - if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { - return utils.APIErrorHandler(err) + //add the resource only if it's not present + if has, err := apierV1.DataManager.HasData(utils.ResourcesPrefix, arg.ID, arg.Tenant); err != nil { + return err + } else if !has { + if err := apierV1.DataManager.SetResource( + &engine.Resource{Tenant: arg.Tenant, + ID: arg.ID, + Usages: make(map[string]*engine.ResourceUsage)}); err != nil { + return utils.APIErrorHandler(err) + } + //handle caching for Resource + argCache = engine.ArgsGetCacheItem{ + CacheID: utils.CacheResources, + ItemID: arg.TenantID(), + } + if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { + return utils.APIErrorHandler(err) + } } + *reply = utils.OK return nil } diff --git a/apier/v1/stats.go b/apier/v1/stats.go index 98a0eeeef..1a8d72782 100644 --- a/apier/v1/stats.go +++ b/apier/v1/stats.go @@ -84,25 +84,29 @@ func (apierV1 *ApierV1) SetStatQueueProfile(arg *StatQueueWithCache, reply *stri if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { return utils.APIErrorHandler(err) } - //compose metrics for StatQueue - metrics := make(map[string]engine.StatMetric) - for _, metric := range arg.Metrics { - if stsMetric, err := engine.NewStatMetric(metric.MetricID, arg.MinItems, metric.FilterIDs); err != nil { - return utils.APIErrorHandler(err) - } else { - metrics[metric.MetricID] = stsMetric + if has, err := apierV1.DataManager.HasData(utils.StatQueuePrefix, arg.ID, arg.Tenant); err != nil { + return err + } else if !has { + //compose metrics for StatQueue + metrics := make(map[string]engine.StatMetric) + for _, metric := range arg.Metrics { + if stsMetric, err := engine.NewStatMetric(metric.MetricID, arg.MinItems, metric.FilterIDs); err != nil { + return utils.APIErrorHandler(err) + } else { + metrics[metric.MetricID] = stsMetric + } + } + if err := apierV1.DataManager.SetStatQueue(&engine.StatQueue{Tenant: arg.Tenant, ID: arg.ID, SQMetrics: metrics}); err != nil { + return utils.APIErrorHandler(err) + } + //handle caching for StatQueues + argCache = engine.ArgsGetCacheItem{ + CacheID: utils.CacheStatQueues, + ItemID: arg.TenantID(), + } + if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { + return utils.APIErrorHandler(err) } - } - if err := apierV1.DataManager.SetStatQueue(&engine.StatQueue{Tenant: arg.Tenant, ID: arg.ID, SQMetrics: metrics}); err != nil { - return utils.APIErrorHandler(err) - } - //handle caching for StatQueues - argCache = engine.ArgsGetCacheItem{ - CacheID: utils.CacheStatQueues, - ItemID: arg.TenantID(), - } - if err := apierV1.CallCache(GetCacheOpt(arg.Cache), argCache); err != nil { - return utils.APIErrorHandler(err) } *reply = utils.OK diff --git a/apier/v1/thresholds.go b/apier/v1/thresholds.go index a35a7b2e2..ed3dc8453 100644 --- a/apier/v1/thresholds.go +++ b/apier/v1/thresholds.go @@ -118,17 +118,23 @@ func (apierV1 *ApierV1) SetThresholdProfile(args *ThresholdWithCache, reply *str if err := apierV1.CallCache(GetCacheOpt(args.Cache), argCache); err != nil { return utils.APIErrorHandler(err) } - if err := apierV1.DataManager.SetThreshold(&engine.Threshold{Tenant: args.Tenant, ID: args.ID}); err != nil { + + if has, err := apierV1.DataManager.HasData(utils.ThresholdPrefix, args.ID, args.Tenant); err != nil { return err + } else if !has { + if err := apierV1.DataManager.SetThreshold(&engine.Threshold{Tenant: args.Tenant, ID: args.ID}); err != nil { + return err + } + //handle caching for Threshold + argCache = engine.ArgsGetCacheItem{ + CacheID: utils.CacheThresholds, + ItemID: args.TenantID(), + } + if err := apierV1.CallCache(GetCacheOpt(args.Cache), argCache); err != nil { + return utils.APIErrorHandler(err) + } } - //handle caching for Threshold - argCache = engine.ArgsGetCacheItem{ - CacheID: utils.CacheThresholds, - ItemID: args.TenantID(), - } - if err := apierV1.CallCache(GetCacheOpt(args.Cache), argCache); err != nil { - return utils.APIErrorHandler(err) - } + *reply = utils.OK return nil } diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go index d71b70577..77cf247a8 100644 --- a/engine/storage_mongo_datadb.go +++ b/engine/storage_mongo_datadb.go @@ -670,6 +670,8 @@ func (ms *MongoStorage) HasDataDrv(category, subject, tenant string) (has bool, case utils.StatQueueProfilePrefix: count, err = ms.getCol(colSqp).Count(sctx, bson.M{"tenant": tenant, "id": subject}) case utils.ThresholdPrefix: + count, err = ms.getCol(colThs).Count(sctx, bson.M{"tenant": tenant, "id": subject}) + case utils.ThresholdProfilePrefix: count, err = ms.getCol(colTps).Count(sctx, bson.M{"tenant": tenant, "id": subject}) case utils.FilterPrefix: count, err = ms.getCol(colFlt).Count(sctx, bson.M{"tenant": tenant, "id": subject}) diff --git a/engine/tpreader.go b/engine/tpreader.go index f23e6e745..dbf075b79 100644 --- a/engine/tpreader.go +++ b/engine/tpreader.go @@ -60,11 +60,6 @@ type TpReader struct { resources []*utils.TenantID // IDs of resources which need creation based on resourceProfiles statQueues []*utils.TenantID // IDs of statQueues which need creation based on statQueueProfiles thresholds []*utils.TenantID // IDs of thresholds which need creation based on thresholdProfiles - suppliers []*utils.TenantID // IDs of suppliers which need creation based on sppProfiles - attrTntID []*utils.TenantID // IDs of suppliers which need creation based on attributeProfiles - chargers []*utils.TenantID // IDs of chargers which need creation based on chargerProfiles - dpps []*utils.TenantID // IDs of dispatchers which need creation based on dispatcherProfiles - dphs []*utils.TenantID // IDs of dispatcherHosts which need creation based on dispatcherHosts revDests, acntActionPlans map[string][]string cacheS rpcclient.RpcClientConnection @@ -1208,13 +1203,6 @@ func (tpr *TpReader) LoadSupplierProfilesFiltered(tag string) (err error) { mapRsPfls[utils.TenantID{Tenant: rl.Tenant, ID: rl.ID}] = rl } tpr.sppProfiles = mapRsPfls - for tntID := range mapRsPfls { - if has, err := tpr.dm.HasData(utils.SupplierProfilePrefix, tntID.ID, tntID.Tenant); err != nil { - return err - } else if !has { - tpr.suppliers = append(tpr.suppliers, &utils.TenantID{Tenant: tntID.Tenant, ID: tntID.ID}) - } - } return nil } @@ -1232,13 +1220,6 @@ func (tpr *TpReader) LoadAttributeProfilesFiltered(tag string) (err error) { mapAttrPfls[utils.TenantID{Tenant: attr.Tenant, ID: attr.ID}] = attr } tpr.attributeProfiles = mapAttrPfls - for tntID := range mapAttrPfls { - if has, err := tpr.dm.HasData(utils.AttributeProfilePrefix, tntID.ID, tntID.Tenant); err != nil { - return err - } else if !has { - tpr.attrTntID = append(tpr.attrTntID, &utils.TenantID{Tenant: tntID.Tenant, ID: tntID.ID}) - } - } return nil } @@ -1256,13 +1237,6 @@ func (tpr *TpReader) LoadChargerProfilesFiltered(tag string) (err error) { mapChargerProfile[utils.TenantID{Tenant: rl.Tenant, ID: rl.ID}] = rl } tpr.chargerProfiles = mapChargerProfile - for tntID := range mapChargerProfile { - if has, err := tpr.dm.HasData(utils.ChargerProfilePrefix, tntID.ID, tntID.Tenant); err != nil { - return err - } else if !has { - tpr.chargers = append(tpr.chargers, &utils.TenantID{Tenant: tntID.Tenant, ID: tntID.ID}) - } - } return nil } @@ -1280,13 +1254,6 @@ func (tpr *TpReader) LoadDispatcherProfilesFiltered(tag string) (err error) { mapDispatcherProfile[utils.TenantID{Tenant: rl.Tenant, ID: rl.ID}] = rl } tpr.dispatcherProfiles = mapDispatcherProfile - for tntID := range mapDispatcherProfile { - if has, err := tpr.dm.HasData(utils.DispatcherProfilePrefix, tntID.ID, tntID.Tenant); err != nil { - return err - } else if !has { - tpr.dpps = append(tpr.dpps, &utils.TenantID{Tenant: tntID.Tenant, ID: tntID.ID}) - } - } return nil } @@ -1304,13 +1271,6 @@ func (tpr *TpReader) LoadDispatcherHostsFiltered(tag string) (err error) { mapDispatcherHost[utils.TenantID{Tenant: rl.Tenant, ID: rl.ID}] = rl } tpr.dispatcherHosts = mapDispatcherHost - for tntID := range mapDispatcherHost { - if has, err := tpr.dm.HasData(utils.DispatcherHostPrefix, tntID.ID, tntID.Tenant); err != nil { - return err - } else if !has { - tpr.dphs = append(tpr.dphs, &utils.TenantID{Tenant: tntID.Tenant, ID: tntID.ID}) - } - } return nil } diff --git a/general_tests/accounts_it_test.go b/general_tests/accounts_it_test.go index e2f702d5e..fe2c123cc 100644 --- a/general_tests/accounts_it_test.go +++ b/general_tests/accounts_it_test.go @@ -254,6 +254,7 @@ func testV1AccSendToThreshold(t *testing.T) { MaxHits: -1, MinSleep: time.Duration(1 * time.Second), Weight: 20.0, + Async: true, ActionIDs: []string{"DISABLE_LOG"}, }