diff --git a/cmd/cgr-tester/cgr-tester.go b/cmd/cgr-tester/cgr-tester.go index f0b220794..05bb50309 100644 --- a/cmd/cgr-tester/cgr-tester.go +++ b/cmd/cgr-tester/cgr-tester.go @@ -84,7 +84,7 @@ func durInternalRater(cd *engine.CallDescriptorWithArgDispatcher) (time.Duration defer dm.DataDB().Close() engine.SetDataStorage(dm) if err := dm.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil); err != nil { + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil); err != nil { return nilDuration, fmt.Errorf("Cache rating error: %s", err.Error()) } log.Printf("Runnning %d cycles...", *runs) diff --git a/config/config_defaults.go b/config/config_defaults.go index 6b104cf29..98fd90afa 100755 --- a/config/config_defaults.go +++ b/config/config_defaults.go @@ -208,6 +208,7 @@ const CGRATES_CFG_JSON = ` "*charger_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "precache": false, "replicate": false}, // control charger profile caching "*dispatcher_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "precache": false, "replicate": false}, // control dispatcher profile caching "*dispatcher_hosts": {"limit": -1, "ttl": "", "static_ttl": false, "precache": false, "replicate": false}, // control dispatcher hosts caching + "*rate_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "precache": false, "replicate": false}, // control rate profile caching "*resource_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control resource filter indexes caching "*stat_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control stat filter indexes caching "*threshold_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control threshold filter indexes caching @@ -215,6 +216,7 @@ const CGRATES_CFG_JSON = ` "*attribute_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control attribute filter indexes caching "*charger_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control charger filter indexes caching "*dispatcher_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control dispatcher filter indexes caching + "*rate_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control rate filter indexes caching "*dispatcher_routes": {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control dispatcher routes caching "*dispatcher_loads": {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control dispatcher load ( in case of *load strategy ) "*dispatchers": {"limit": -1, "ttl": "", "static_ttl": false, "replicate": false}, // control dispatcher interface diff --git a/config/config_json_test.go b/config/config_json_test.go index bacc7593d..89f9e5da2 100755 --- a/config/config_json_test.go +++ b/config/config_json_test.go @@ -139,6 +139,9 @@ func TestCacheJsonCfg(t *testing.T) { utils.CacheDispatcherProfiles: {Limit: utils.IntPointer(-1), Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), Precache: utils.BoolPointer(false), Replicate: utils.BoolPointer(false)}, + utils.CacheRateProfiles: {Limit: utils.IntPointer(-1), + Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), + Precache: utils.BoolPointer(false), Replicate: utils.BoolPointer(false)}, utils.CacheDispatcherHosts: {Limit: utils.IntPointer(-1), Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), Precache: utils.BoolPointer(false), Replicate: utils.BoolPointer(false)}, @@ -163,6 +166,9 @@ func TestCacheJsonCfg(t *testing.T) { utils.CacheDispatcherFilterIndexes: {Limit: utils.IntPointer(-1), Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), Replicate: utils.BoolPointer(false)}, + utils.CacheRateFilterIndexes: {Limit: utils.IntPointer(-1), + Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), + Replicate: utils.BoolPointer(false)}, utils.CacheDispatcherRoutes: {Limit: utils.IntPointer(-1), Ttl: utils.StringPointer(""), Static_ttl: utils.BoolPointer(false), Replicate: utils.BoolPointer(false)}, diff --git a/config/config_test.go b/config/config_test.go index 61dc42461..39c91c16c 100755 --- a/config/config_test.go +++ b/config/config_test.go @@ -667,6 +667,8 @@ func TestCgrCfgJSONDefaultsCacheCFG(t *testing.T) { TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheDispatcherProfiles: {Limit: -1, TTL: time.Duration(0), StaticTTL: false, Precache: false}, + utils.CacheRateProfiles: {Limit: -1, + TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheDispatcherHosts: {Limit: -1, TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheResourceFilterIndexes: {Limit: -1, @@ -683,6 +685,8 @@ func TestCgrCfgJSONDefaultsCacheCFG(t *testing.T) { TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheDispatcherFilterIndexes: {Limit: -1, TTL: time.Duration(0), StaticTTL: false, Precache: false}, + utils.CacheRateFilterIndexes: {Limit: -1, + TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheDispatcherRoutes: {Limit: -1, TTL: time.Duration(0), StaticTTL: false, Precache: false}, utils.CacheDispatcherLoads: {Limit: -1, diff --git a/ees/eereq.go b/ees/eereq.go index ca2206aa0..3004740e3 100644 --- a/ees/eereq.go +++ b/ees/eereq.go @@ -144,12 +144,21 @@ func (eeR *EventExporterRequest) SetFields(tplFlds []*config.FCTemplate) (err er } return } - fullPath := &utils.FullPath{ - PathItems: tplFld.GetPathItems().Clone(), // need to clone so me do not modify the template - Path: tplFld.Path, + var fullPath *utils.FullPath + var itmPath []string + if fullPath, err = eeR.dynamicProvider.GetFullFieldPath(tplFld.Path); err != nil { + return + } else if fullPath == nil { // no dynamic path + fullPath = &utils.FullPath{ + PathItems: tplFld.GetPathItems().Clone(), // need to clone so me do not modify the template + Path: tplFld.Path, + } + itmPath = tplFld.GetPathSlice()[1:] + } else { + itmPath = fullPath.PathItems.Slice()[1:] } - nMItm := &config.NMItem{Data: out, Path: tplFld.GetPathSlice()[1:], Config: tplFld} + nMItm := &config.NMItem{Data: out, Path: itmPath, Config: tplFld} switch tplFld.Type { case utils.META_COMPOSED: err = utils.ComposeNavMapVal(eeR, fullPath, nMItm) diff --git a/engine/caches.go b/engine/caches.go index 19ab9a4c8..298ac3ad0 100644 --- a/engine/caches.go +++ b/engine/caches.go @@ -59,6 +59,9 @@ func init() { gob.Register(new(DispatcherHost)) gob.Register(new(DispatcherHostProfile)) gob.Register(new(DispatcherHostWithArgDispatcher)) + // RateProfiles + gob.Register(new(RateProfile)) + gob.Register(new(RateProfileWithArgDispatcher)) // CDRs gob.Register(new(EventCost)) @@ -443,6 +446,12 @@ func (chS *CacheS) V1ReloadCache(attrs utils.AttrReloadCacheWithArgDispatcher, r return } } + if len(attrs.RateProfileIDs) != 0 { + // RateProfiles + if err = chS.reloadCache(utils.RateProfilePrefix, attrs.RateProfileIDs); err != nil { + return + } + } //get loadIDs from database for all types loadIDs, err := chS.dm.GetItemLoadIDs(utils.EmptyString, false) @@ -486,6 +495,7 @@ func (chS *CacheS) V1LoadCache(args utils.AttrReloadCacheWithArgDispatcher, repl args.ChargerProfileIDs, args.DispatcherProfileIDs, args.DispatcherHostIDs, + args.RateProfileIDs, ); err != nil { return utils.NewErrServerError(err) } @@ -571,6 +581,9 @@ func populateCacheLoadIDs(loadIDs map[string]int64, attrs utils.ArgsCache) (cach if attrs.DispatcherProfileIDs == nil || len(attrs.DispatcherProfileIDs) != 0 { cacheLoadIDs[utils.CacheDispatcherProfiles] = loadIDs[utils.CacheDispatcherProfiles] } + if attrs.RateProfileIDs == nil || len(attrs.RateProfileIDs) != 0 { + cacheLoadIDs[utils.CacheRateProfiles] = loadIDs[utils.CacheRateProfiles] + } return } diff --git a/engine/datamanager.go b/engine/datamanager.go index 64ba30957..11dd004bc 100644 --- a/engine/datamanager.go +++ b/engine/datamanager.go @@ -117,7 +117,7 @@ func (dm *DataManager) DataDB() DataDB { func (dm *DataManager) LoadDataDBCache(dstIDs, rvDstIDs, rplIDs, rpfIDs, actIDs, aplIDs, aaPlIDs, atrgIDs, sgIDs, rpIDs, resIDs, stqIDs, stqpIDs, thIDs, thpIDs, fltrIDs, - rPrflIDs, alsPrfIDs, cppIDs, dppIDs, dphIDs []string) (err error) { + rPrflIDs, alsPrfIDs, cppIDs, dppIDs, dphIDs, ratePrfIDs []string) (err error) { if dm == nil { err = utils.ErrNoDatabaseConn return @@ -158,6 +158,7 @@ func (dm *DataManager) LoadDataDBCache(dstIDs, rvDstIDs, rplIDs, rpfIDs, actIDs, utils.ChargerProfilePrefix: cppIDs, utils.DispatcherProfilePrefix: dppIDs, utils.DispatcherHostPrefix: dphIDs, + utils.RateProfilePrefix: ratePrfIDs, } { if err = dm.CacheDataFromDB(key, ids, false); err != nil { return @@ -296,6 +297,9 @@ func (dm *DataManager) CacheDataFromDB(prfx string, ids []string, mustBeCached b case utils.DispatcherHostPrefix: tntID := utils.NewTenantID(dataID) _, err = dm.GetDispatcherHost(tntID.Tenant, tntID.ID, false, true, utils.NonTransactional) + case utils.RateProfilePrefix: + tntID := utils.NewTenantID(dataID) + _, err = dm.GetRateProfile(tntID.Tenant, tntID.ID, false, true, utils.NonTransactional) case utils.AttributeFilterIndexes: err = dm.MatchFilterIndexFromKey(utils.CacheAttributeFilterIndexes, dataID) case utils.ResourceFilterIndexes: @@ -310,6 +314,8 @@ func (dm *DataManager) CacheDataFromDB(prfx string, ids []string, mustBeCached b err = dm.MatchFilterIndexFromKey(utils.CacheChargerFilterIndexes, dataID) case utils.DispatcherFilterIndexes: err = dm.MatchFilterIndexFromKey(utils.CacheDispatcherFilterIndexes, dataID) + case utils.RateFilterIndexes: + err = dm.MatchFilterIndexFromKey(utils.CacheRateFilterIndexes, dataID) case utils.LoadIDPrefix: _, err = dm.GetItemLoadIDs(utils.EmptyString, true) } @@ -3092,6 +3098,143 @@ func (dm *DataManager) SetLoadIDs(loadIDs map[string]int64) (err error) { return } +func (dm *DataManager) GetRateProfile(tenant, id string, cacheRead, cacheWrite bool, + transactionID string) (rpp *RateProfile, err error) { + tntID := utils.ConcatenatedKey(tenant, id) + if cacheRead { + if x, ok := Cache.Get(utils.CacheRateProfiles, tntID); ok { + if x == nil { + return nil, utils.ErrNotFound + } + return x.(*RateProfile), nil + } + } + if dm == nil { + err = utils.ErrNoDatabaseConn + return + } + rpp, err = dm.dataDB.GetRateProfileDrv(tenant, id) + if err != nil { + if itm := config.CgrConfig().DataDbCfg().Items[utils.MetaRateProfiles]; err == utils.ErrNotFound && itm.Remote { + if err = dm.connMgr.Call(config.CgrConfig().DataDbCfg().RmtConns, nil, + utils.ReplicatorSv1GetRateProfile, + &utils.TenantIDWithArgDispatcher{ + TenantID: &utils.TenantID{Tenant: tenant, ID: id}, + ArgDispatcher: &utils.ArgDispatcher{ + APIKey: utils.StringPointer(itm.APIKey), + RouteID: utils.StringPointer(itm.RouteID), + }}, &rpp); err == nil { + err = dm.dataDB.SetRateProfileDrv(rpp) + } + } + if err != nil { + err = utils.CastRPCErr(err) + if err == utils.ErrNotFound && cacheWrite { + if errCh := Cache.Set(utils.CacheRateProfiles, tntID, nil, nil, + cacheCommit(transactionID), transactionID); errCh != nil { + return nil, errCh + } + + } + return nil, err + } + } + if cacheWrite { + if errCh := Cache.Set(utils.CacheRateProfiles, tntID, rpp, nil, + cacheCommit(transactionID), transactionID); errCh != nil { + return nil, errCh + } + } + return +} + +func (dm *DataManager) SetRateProfile(rpp *RateProfile, withIndex bool) (err error) { + if dm == nil { + err = utils.ErrNoDatabaseConn + return + } + oldRpp, err := dm.GetRateProfile(rpp.Tenant, rpp.ID, true, false, utils.NonTransactional) + if err != nil && err != utils.ErrNotFound { + return err + } + if err = dm.DataDB().SetRateProfileDrv(rpp); err != nil { + return err + } + if withIndex { + if oldRpp != nil { + var needsRemove bool + for _, fltrID := range oldRpp.FilterIDs { + if !utils.IsSliceMember(rpp.FilterIDs, fltrID) { + needsRemove = true + } + } + + if needsRemove { + if err = NewFilterIndexer(dm, utils.RouteProfilePrefix, + rpp.Tenant).RemoveItemFromIndex(rpp.Tenant, rpp.ID, oldRpp.FilterIDs); err != nil { + return + } + } + } + if err = createAndIndex(utils.RouteProfilePrefix, rpp.Tenant, utils.EmptyString, + rpp.ID, rpp.FilterIDs, dm); err != nil { + return + } + + } + if itm := config.CgrConfig().DataDbCfg().Items[utils.MetaDispatcherProfiles]; itm.Replicate { + var reply string + if err = dm.connMgr.Call(config.CgrConfig().DataDbCfg().RplConns, nil, + utils.ReplicatorSv1SetRateProfile, + &RateProfileWithArgDispatcher{ + RateProfile: rpp, + ArgDispatcher: &utils.ArgDispatcher{ + APIKey: utils.StringPointer(itm.APIKey), + RouteID: utils.StringPointer(itm.RouteID), + }}, &reply); err != nil { + err = utils.CastRPCErr(err) + return + } + } + return +} + +func (dm *DataManager) RemoveRateProfile(tenant, id string, + transactionID string, withIndex bool) (err error) { + if dm == nil { + err = utils.ErrNoDatabaseConn + return + } + oldRpp, err := dm.GetRateProfile(tenant, id, true, false, utils.NonTransactional) + if err != nil && err != utils.ErrNotFound { + return err + } + if err = dm.DataDB().RemoveRateProfileDrv(tenant, id); err != nil { + return + } + if oldRpp == nil { + return utils.ErrNotFound + } + if withIndex { + if err = NewFilterIndexer(dm, utils.RateProfilePrefix, + tenant).RemoveItemFromIndex(tenant, id, oldRpp.FilterIDs); err != nil { + return + } + } + if itm := config.CgrConfig().DataDbCfg().Items[utils.MetaRateProfiles]; itm.Replicate { + var reply string + dm.connMgr.Call(config.CgrConfig().DataDbCfg().RplConns, nil, + utils.ReplicatorSv1RemoveRateProfile, + &utils.TenantIDWithArgDispatcher{ + TenantID: &utils.TenantID{Tenant: tenant, ID: id}, + ArgDispatcher: &utils.ArgDispatcher{ + APIKey: utils.StringPointer(itm.APIKey), + RouteID: utils.StringPointer(itm.RouteID), + }}, &reply) + } + return +} + // Reconnect reconnects to the DB when the config was changed func (dm *DataManager) Reconnect(marshaller string, newcfg *config.DataDbCfg) (err error) { d, err := NewDataDBConn(newcfg.DataDbType, newcfg.DataDbHost, newcfg.DataDbPort, newcfg.DataDbName, diff --git a/engine/libtest.go b/engine/libtest.go index c210cec75..b846d7b41 100644 --- a/engine/libtest.go +++ b/engine/libtest.go @@ -279,6 +279,11 @@ cgrates.org,D1,,,,*first,,C2,*lt:~*req.Usage:10,10,false,192.168.56.204, #Tenant[0],ID[1],Address[2],Transport[3],TLS[4] cgrates.org,ALL1,127.0.0.1:2012,*json,true cgrates.org,ALL1,127.0.0.1:3012,*json,false +` + RateProfileCSVContent = ` +#Tenant,ID,FilterIDs,ActivationInterval,Weight,ConnectFee,RoundingMethod,RoundingDecimals,MinCost,MaxCost,MaxCostStrategy,RateID,RateFilterIDs,RateWeight,RateValue,RateUnit,RateIncrement,RateBlocker +cgrates.org,RP1,*string:~*req.Subject:1001,,0,0.1,*up,4,0.1,0.6,*free,FIRST_GI,*gi:~*req.Usage:0,0,0.12,1m,1m,FALSE +cgrates.org,RP1,*string:~*req.Subject:1002,,,,,,,,,SECOND_GI,*gi:~*req.Usage:1m,10,0.06,1m,1s,FALSE ` ) diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index 041ede7c3..2a00e7a69 100644 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -42,7 +42,7 @@ func init() { ActionsCSVContent, ActionPlansCSVContent, ActionTriggersCSVContent, AccountActionsCSVContent, ResourcesCSVContent, StatsCSVContent, ThresholdsCSVContent, FiltersCSVContent, RoutesCSVContent, AttributesCSVContent, ChargersCSVContent, DispatcherCSVContent, - DispatcherHostCSVContent), testTPID, "", nil, nil) + DispatcherHostCSVContent, RateProfileCSVContent), testTPID, "", nil, nil) if err != nil { log.Print("error when creating TpReader:", err) } diff --git a/engine/model_helpers.go b/engine/model_helpers.go index 5e2b090c4..c3a371fe0 100644 --- a/engine/model_helpers.go +++ b/engine/model_helpers.go @@ -3000,3 +3000,254 @@ func DispatcherHostToAPI(dph *DispatcherHost) (tpDPH *utils.TPDispatcherHost) { } return } + +// RateProfileMdls is used +type RateProfileMdls []*RateProfileMdl + +// CSVHeader return the header for csv fields as a slice of string +func (tps RateProfileMdls) CSVHeader() (result []string) { + return []string{"#" + utils.Tenant, utils.ID, utils.FilterIDs, utils.ActivationIntervalString, + utils.Weight, utils.ConnectFee, utils.RoundingMethod, utils.RoundingDecimals, utils.MinCost, + utils.MaxCost, utils.MaxCostStrategy, utils.RateID, utils.RateFilterIDs, utils.RateWeight, + utils.RateValue, utils.RateUnit, utils.RateIncrement, utils.RateBlocker, + } +} + +func (tps RateProfileMdls) AsTPRateProfile() (result []*TPRateProfile, err error) { + filtermap := make(map[string]utils.StringMap) + mst := make(map[string]*TPRateProfile) + rateMap := make(map[string]map[string]*Rate) + for _, tp := range tps { + tenID := (&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID() + rPrf, found := mst[tenID] + if !found { + rPrf = &TPRateProfile{ + TPid: tp.Tpid, + Tenant: tp.Tenant, + ID: tp.ID, + } + } + if tp.RateID != utils.EmptyString { + if _, has := rateMap[tenID]; !has { + rateMap[(&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID()] = make(map[string]*Rate) + } + rateID := tp.RateID + if tp.RateFilterIDs != utils.EmptyString { + rateID = utils.ConcatenatedKey(rateID, + utils.NewStringSet(strings.Split(tp.RateFilterIDs, utils.INFIELD_SEP)).Sha1()) + } + rate, found := rateMap[tenID][rateID] + if !found { + rate = &Rate{ + ID: tp.RateID, + Blocker: tp.RateBlocker, + } + } + if tp.RateFilterIDs != utils.EmptyString { + rateFilterSplit := strings.Split(tp.RateFilterIDs, utils.INFIELD_SEP) + rate.FilterIDs = append(rate.FilterIDs, rateFilterSplit...) + } + if tp.RateWeight != 0 { + rate.Weight = tp.RateWeight + } + if tp.RateValue != 0 { + rate.Value = tp.RateValue + } + if tp.RateIncrement != utils.EmptyString { + if rate.Increment, err = utils.ParseDurationWithNanosecs(tp.RateIncrement); err != nil { + return + } + } + if tp.RateUnit != utils.EmptyString { + if rate.Unit, err = utils.ParseDurationWithNanosecs(tp.RateUnit); err != nil { + return + } + } + + rateMap[tenID][rateID] = rate + } + + if tp.Weight != 0 { + rPrf.Weight = tp.Weight + } + if tp.ConnectFee != 0 { + rPrf.ConnectFee = tp.ConnectFee + } + if tp.RoundingMethod != utils.EmptyString { + rPrf.RoundingMethod = tp.RoundingMethod + } + if tp.RoundingDecimals != 0 { + rPrf.RoundingDecimals = tp.RoundingDecimals + } + if tp.MinCost != 0 { + rPrf.MinCost = tp.MinCost + } + if tp.MaxCost != 0 { + rPrf.MaxCost = tp.MaxCost + } + if tp.MaxCostStrategy != utils.EmptyString { + rPrf.MaxCostStrategy = tp.MaxCostStrategy + } + if tp.ActivationInterval != utils.EmptyString { + rPrf.ActivationInterval = new(utils.TPActivationInterval) + aiSplt := strings.Split(tp.ActivationInterval, utils.INFIELD_SEP) + if len(aiSplt) == 2 { + rPrf.ActivationInterval.ActivationTime = aiSplt[0] + rPrf.ActivationInterval.ExpiryTime = aiSplt[1] + } else if len(aiSplt) == 1 { + rPrf.ActivationInterval.ActivationTime = aiSplt[0] + } + } + if tp.FilterIDs != utils.EmptyString { + if _, has := filtermap[(&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID()]; !has { + filtermap[(&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID()] = make(utils.StringMap) + } + filterSplit := strings.Split(tp.FilterIDs, utils.INFIELD_SEP) + for _, filter := range filterSplit { + filtermap[(&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID()][filter] = true + } + } + mst[(&utils.TenantID{Tenant: tp.Tenant, ID: tp.ID}).TenantID()] = rPrf + } + result = make([]*TPRateProfile, len(mst)) + i := 0 + for tntID, th := range mst { + result[i] = th + for _, rate := range rateMap[tntID] { + result[i].Rates = append(result[i].Rates, rate) + } + for filterdata := range filtermap[tntID] { + result[i].FilterIDs = append(result[i].FilterIDs, filterdata) + } + i++ + } + return +} + +func APItoModelTPRateProfile(tPrf *TPRateProfile) (mdls RateProfileMdls) { + if len(tPrf.Rates) == 0 { + return + } + for i, rate := range tPrf.Rates { + mdl := &RateProfileMdl{ + Tenant: tPrf.Tenant, + Tpid: tPrf.TPid, + ID: tPrf.ID, + } + if i == 0 { + for i, val := range tPrf.FilterIDs { + if i != 0 { + mdl.FilterIDs += utils.INFIELD_SEP + } + mdl.FilterIDs += val + } + + if tPrf.ActivationInterval != nil { + if tPrf.ActivationInterval.ActivationTime != utils.EmptyString { + mdl.ActivationInterval = tPrf.ActivationInterval.ActivationTime + } + if tPrf.ActivationInterval.ExpiryTime != utils.EmptyString { + mdl.ActivationInterval += utils.INFIELD_SEP + tPrf.ActivationInterval.ExpiryTime + } + } + mdl.Weight = tPrf.Weight + mdl.ConnectFee = tPrf.ConnectFee + mdl.RoundingMethod = tPrf.RoundingMethod + mdl.RoundingDecimals = tPrf.RoundingDecimals + mdl.MinCost = tPrf.MinCost + mdl.MaxCost = tPrf.MaxCost + mdl.MaxCostStrategy = tPrf.MaxCostStrategy + } + mdl.RateID = rate.ID + for i, val := range rate.FilterIDs { + if i != 0 { + mdl.RateFilterIDs += utils.INFIELD_SEP + } + mdl.RateFilterIDs += val + } + mdl.RateWeight = rate.Weight + mdl.RateValue = rate.Weight + mdl.RateUnit = rate.Unit.String() + mdl.RateIncrement = rate.Increment.String() + mdl.RateBlocker = rate.Blocker + mdls = append(mdls, mdl) + } + return +} + +func APItoRateProfile(tpRp *TPRateProfile, timezone string) (rp *RateProfile, err error) { + rp = &RateProfile{ + Tenant: tpRp.Tenant, + ID: tpRp.ID, + FilterIDs: make([]string, len(tpRp.FilterIDs)), + Weight: tpRp.Weight, + ConnectFee: tpRp.ConnectFee, + RoundingMethod: tpRp.RoundingMethod, + RoundingDecimals: tpRp.RoundingDecimals, + MinCost: tpRp.MinCost, + MaxCost: tpRp.MaxCost, + MaxCostStrategy: tpRp.MaxCostStrategy, + Rates: make([]*Rate, len(tpRp.Rates)), + } + for i, stp := range tpRp.FilterIDs { + rp.FilterIDs[i] = stp + } + if tpRp.ActivationInterval != nil { + if rp.ActivationInterval, err = tpRp.ActivationInterval.AsActivationInterval(timezone); err != nil { + return nil, err + } + } + for i, rate := range tpRp.Rates { + rp.Rates[i] = &Rate{ + ID: rate.ID, + Weight: rate.Weight, + Blocker: rate.Blocker, + FilterIDs: rate.FilterIDs, + Value: rate.Value, + Unit: rate.Unit, + Increment: rate.Increment, + } + } + return rp, nil +} + +func RateProfileToAPI(rp *RateProfile) (tpRp *TPRateProfile) { + tpRp = &TPRateProfile{ + Tenant: rp.Tenant, + ID: rp.ID, + FilterIDs: make([]string, len(rp.FilterIDs)), + ActivationInterval: new(utils.TPActivationInterval), + Weight: rp.Weight, + ConnectFee: rp.ConnectFee, + RoundingMethod: rp.RoundingMethod, + RoundingDecimals: rp.RoundingDecimals, + MinCost: rp.MinCost, + MaxCost: rp.MaxCost, + MaxCostStrategy: rp.MaxCostStrategy, + Rates: make([]*Rate, len(rp.Rates)), + } + + for i, rate := range rp.Rates { + tpRp.Rates[i] = &Rate{ + ID: rate.ID, + Weight: rate.Weight, + Blocker: rate.Blocker, + FilterIDs: rate.FilterIDs, + Value: rate.Value, + Unit: rate.Unit, + Increment: rate.Increment, + } + } + for i, fli := range rp.FilterIDs { + tpRp.FilterIDs[i] = fli + } + if rp.ActivationInterval != nil { + if !rp.ActivationInterval.ActivationTime.IsZero() { + tpRp.ActivationInterval.ActivationTime = rp.ActivationInterval.ActivationTime.Format(time.RFC3339) + } + if !rp.ActivationInterval.ExpiryTime.IsZero() { + tpRp.ActivationInterval.ExpiryTime = rp.ActivationInterval.ExpiryTime.Format(time.RFC3339) + } + } + return +} diff --git a/engine/models.go b/engine/models.go index 6bba9b7f9..e5609b8b6 100644 --- a/engine/models.go +++ b/engine/models.go @@ -429,3 +429,31 @@ type TPDispatcherHost struct { TLS bool `index:"4" re:""` CreatedAt time.Time } + +type RateProfileMdl struct { + PK uint `gorm:"primary_key"` + Tpid string + Tenant string `index:"0" re:""` + ID string `index:"1" re:""` + FilterIDs string `index:"2" re:""` + ActivationInterval string `index:"3" re:""` + Weight float64 `index:"4" re:"\d+\.?\d*"` + ConnectFee float64 `index:"5" re:"\d+\.?\d*"` + RoundingMethod string `index:"6" re:""` + RoundingDecimals int `index:"7" re:""` + MinCost float64 `index:"8" re:"\d+\.?\d*""` + MaxCost float64 `index:"9" re:"\d+\.?\d*"` + MaxCostStrategy string `index:"10" re:""` + RateID string `index:"11" re:""` + RateFilterIDs string `index:"12" re:""` + RateWeight float64 `index:"13" re:"\d+\.?\d*"` + RateValue float64 `index:"14" re:"\d+\.?\d*"` + RateUnit string `index:"15" re:""` + RateIncrement string `index:"16" re:""` + RateBlocker bool `index:"17" re:""` + CreatedAt time.Time +} + +func (_ RateProfileMdl) TableName() string { + return utils.TBLTPRateProfiles +} diff --git a/rates/rateprofile.go b/engine/rateprofile.go similarity index 72% rename from rates/rateprofile.go rename to engine/rateprofile.go index a6e3bb80e..bef7dc8af 100644 --- a/rates/rateprofile.go +++ b/engine/rateprofile.go @@ -16,7 +16,7 @@ You should have received a copy of the GNU General Public License along with this program. If not, see */ -package rates +package engine import ( "time" @@ -44,6 +44,10 @@ type RateProfile struct { maxCost *utils.Decimal } +func (rpp *RateProfile) TenantID() string { + return utils.ConcatenatedKey(rpp.Tenant, rpp.ID) +} + // Route defines rate related information used within a RateProfile type Rate struct { ID string // RateID @@ -56,3 +60,25 @@ type Rate struct { val *utils.Decimal // cached version of the Decimal } + +// RateProfileWithArgDispatcher is used in replicatorV1 for dispatcher +type RateProfileWithArgDispatcher struct { + *RateProfile + *utils.ArgDispatcher +} + +type TPRateProfile struct { + TPid string + Tenant string + ID string + FilterIDs []string + ActivationInterval *utils.TPActivationInterval + Weight float64 + ConnectFee float64 + RoundingMethod string + RoundingDecimals int + MinCost float64 + MaxCost float64 + MaxCostStrategy string + Rates []*Rate +} diff --git a/engine/storage_csv.go b/engine/storage_csv.go index 551f9a89c..cfe3946dc 100644 --- a/engine/storage_csv.go +++ b/engine/storage_csv.go @@ -65,6 +65,7 @@ type CSVStorage struct { chargerProfilesFn []string dispatcherProfilesFn []string dispatcherHostsFn []string + rateProfilesFn []string } // NewCSVStorage creates a CSV storege that takes the data from the paths specified @@ -74,7 +75,7 @@ func NewCSVStorage(sep rune, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, resProfilesFn, statsFn, thresholdsFn, filterFn, routeProfilesFn, attributeProfilesFn, - chargerProfilesFn, dispatcherProfilesFn, dispatcherHostsFn []string) *CSVStorage { + chargerProfilesFn, dispatcherProfilesFn, dispatcherHostsFn, rateProfilesFn []string) *CSVStorage { return &CSVStorage{ sep: sep, generator: NewCsvFile, @@ -98,6 +99,7 @@ func NewCSVStorage(sep rune, chargerProfilesFn: chargerProfilesFn, dispatcherProfilesFn: dispatcherProfilesFn, dispatcherHostsFn: dispatcherHostsFn, + rateProfilesFn: rateProfilesFn, } } @@ -127,6 +129,7 @@ func NewFileCSVStorage(sep rune, dataPath string) *CSVStorage { chargersPaths := appendName(allFoldersPath, utils.ChargersCsv) dispatcherprofilesPaths := appendName(allFoldersPath, utils.DispatcherProfilesCsv) dispatcherhostsPaths := appendName(allFoldersPath, utils.DispatcherHostsCsv) + rateProfilesFn := append(allFoldersPath, utils.RateProfilesCsv) return NewCSVStorage(sep, destinationsPaths, timingsPaths, @@ -148,6 +151,7 @@ func NewFileCSVStorage(sep rune, dataPath string) *CSVStorage { chargersPaths, dispatcherprofilesPaths, dispatcherhostsPaths, + rateProfilesFn, ) } @@ -159,14 +163,14 @@ func NewStringCSVStorage(sep rune, accountactionsFn, resProfilesFn, statsFn, thresholdsFn, filterFn, routeProfilesFn, attributeProfilesFn, chargerProfilesFn, - dispatcherProfilesFn, dispatcherHostsFn string) *CSVStorage { + dispatcherProfilesFn, dispatcherHostsFn, rateProfilesFn string) *CSVStorage { c := NewCSVStorage(sep, []string{destinationsFn}, []string{timingsFn}, []string{ratesFn}, []string{destinationratesFn}, []string{destinationratetimingsFn}, []string{ratingprofilesFn}, []string{sharedgroupsFn}, []string{actionsFn}, []string{actiontimingsFn}, []string{actiontriggersFn}, []string{accountactionsFn}, []string{resProfilesFn}, []string{statsFn}, []string{thresholdsFn}, []string{filterFn}, []string{routeProfilesFn}, []string{attributeProfilesFn}, []string{chargerProfilesFn}, - []string{dispatcherProfilesFn}, []string{dispatcherHostsFn}) + []string{dispatcherProfilesFn}, []string{dispatcherHostsFn}, []string{rateProfilesFn}) c.generator = NewCsvString return c } @@ -207,7 +211,8 @@ func NewGoogleCSVStorage(sep rune, spreadsheetID string) (*CSVStorage, error) { getIfExist(utils.Attributes), getIfExist(utils.Chargers), getIfExist(utils.DispatcherProfiles), - getIfExist(utils.DispatcherHosts)) + getIfExist(utils.DispatcherHosts), + getIfExist(utils.RateProfiles)) c.generator = func() csvReaderCloser { return &csvGoogle{ spreadsheetID: spreadsheetID, @@ -239,6 +244,7 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage { var chargersPaths []string var dispatcherprofilesPaths []string var dispatcherhostsPaths []string + var rateProfilesPaths []string for _, baseURL := range strings.Split(dataPath, utils.INFIELD_SEP) { if !strings.HasSuffix(baseURL, utils.CSVSuffix) { @@ -262,6 +268,7 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage { chargersPaths = append(chargersPaths, joinURL(baseURL, utils.ChargersCsv)) dispatcherprofilesPaths = append(dispatcherprofilesPaths, joinURL(baseURL, utils.DispatcherProfilesCsv)) dispatcherhostsPaths = append(dispatcherhostsPaths, joinURL(baseURL, utils.DispatcherHostsCsv)) + rateProfilesPaths = append(rateProfilesPaths, joinURL(baseURL, utils.RateProfilesCsv)) continue } switch { @@ -305,6 +312,8 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage { dispatcherprofilesPaths = append(dispatcherprofilesPaths, baseURL) case strings.HasSuffix(baseURL, utils.DispatcherHostsCsv): dispatcherhostsPaths = append(dispatcherhostsPaths, baseURL) + case strings.HasSuffix(baseURL, utils.RateProfilesCsv): + rateProfilesPaths = append(rateProfilesPaths, baseURL) } } @@ -329,6 +338,7 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage { chargersPaths, dispatcherprofilesPaths, dispatcherhostsPaths, + rateProfilesPaths, ) c.generator = func() csvReaderCloser { return &csvURL{} @@ -643,6 +653,18 @@ func (csvs *CSVStorage) GetTPDispatcherHosts(tpid, tenant, id string) ([]*utils. return tpDDHs.AsTPDispatcherHosts(), nil } +func (csvs *CSVStorage) GetTPRateProfiles(tpid, tenant, id string) ([]*TPRateProfile, error) { + var tpDPPs RateProfileMdls + if err := csvs.proccesData(RateProfileMdl{}, csvs.rateProfilesFn, func(tp interface{}) { + dpp := tp.(RateProfileMdl) + dpp.Tpid = tpid + tpDPPs = append(tpDPPs, &dpp) + }); err != nil { + return nil, err + } + return tpDPPs.AsTPRateProfile() +} + func (csvs *CSVStorage) GetTpIds(colName string) ([]string, error) { return nil, utils.ErrNotImplemented } diff --git a/engine/storage_interface.go b/engine/storage_interface.go index 60506770a..a62ef155d 100644 --- a/engine/storage_interface.go +++ b/engine/storage_interface.go @@ -133,6 +133,9 @@ type DataDB interface { GetDispatcherHostDrv(string, string) (*DispatcherHost, error) SetDispatcherHostDrv(*DispatcherHost) error RemoveDispatcherHostDrv(string, string) error + GetRateProfileDrv(string, string) (*RateProfile, error) + SetRateProfileDrv(*RateProfile) error + RemoveRateProfileDrv(string, string) error } type StorDB interface { @@ -182,6 +185,7 @@ type LoadReader interface { GetTPChargers(string, string, string) ([]*utils.TPChargerProfile, error) GetTPDispatcherProfiles(string, string, string) ([]*utils.TPDispatcherProfile, error) GetTPDispatcherHosts(string, string, string) ([]*utils.TPDispatcherHost, error) + GetTPRateProfiles(string, string, string) ([]*TPRateProfile, error) } type LoadWriter interface { @@ -206,6 +210,7 @@ type LoadWriter interface { SetTPChargers([]*utils.TPChargerProfile) error SetTPDispatcherProfiles([]*utils.TPDispatcherProfile) error SetTPDispatcherHosts([]*utils.TPDispatcherHost) error + SetTPRateProfiles([]*TPRateProfile) error } // NewMarshaler returns the marshaler type selected by mrshlerStr diff --git a/engine/storage_internal_datadb.go b/engine/storage_internal_datadb.go index ab01a5bf5..bf43b0852 100644 --- a/engine/storage_internal_datadb.go +++ b/engine/storage_internal_datadb.go @@ -1402,6 +1402,26 @@ func (iDB *InternalDB) RemoveDispatcherHostDrv(tenant, id string) (err error) { return } +func (iDB *InternalDB) GetRateProfileDrv(tenant, id string) (rpp *RateProfile, err error) { + x, ok := iDB.db.Get(utils.CacheRateProfiles, utils.ConcatenatedKey(tenant, id)) + if !ok || x == nil { + return nil, utils.ErrNotFound + } + return x.(*RateProfile), nil +} + +func (iDB *InternalDB) SetRateProfileDrv(rpp *RateProfile) (err error) { + iDB.db.Set(utils.CacheRateProfiles, rpp.TenantID(), rpp, nil, + cacheCommit(utils.NonTransactional), utils.NonTransactional) + return +} + +func (iDB *InternalDB) RemoveRateProfileDrv(tenant, id string) (err error) { + iDB.db.Remove(utils.CacheRateProfiles, utils.ConcatenatedKey(tenant, id), + cacheCommit(utils.NonTransactional), utils.NonTransactional) + return +} + func (iDB *InternalDB) RemoveLoadIDsDrv() (err error) { return utils.ErrNotImplemented } diff --git a/engine/storage_internal_stordb.go b/engine/storage_internal_stordb.go index d300f5e6d..3197fa071 100644 --- a/engine/storage_internal_stordb.go +++ b/engine/storage_internal_stordb.go @@ -566,6 +566,28 @@ func (iDB *InternalDB) GetTPDispatcherHosts(tpid, tenant, id string) (dpps []*ut return } +func (iDB *InternalDB) GetTPRateProfiles(tpid, tenant, id string) (tpPrfs []*TPRateProfile, err error) { + key := tpid + if tenant != utils.EmptyString { + key += utils.CONCATENATED_KEY_SEP + tenant + } + if id != utils.EmptyString { + key += utils.CONCATENATED_KEY_SEP + id + } + ids := iDB.db.GetItemIDs(utils.TBLTPRateProfiles, key) + for _, id := range ids { + x, ok := iDB.db.Get(utils.TBLTPRateProfiles, id) + if !ok || x == nil { + return nil, utils.ErrNotFound + } + tpPrfs = append(tpPrfs, x.(*TPRateProfile)) + } + if len(tpPrfs) == 0 { + return nil, utils.ErrNotFound + } + return +} + //implement LoadWriter interface func (iDB *InternalDB) RemTpData(table, tpid string, args map[string]string) (err error) { if table == utils.EmptyString { @@ -805,6 +827,17 @@ func (iDB *InternalDB) SetTPDispatcherHosts(dpps []*utils.TPDispatcherHost) (err return } +func (iDB *InternalDB) SetTPRateProfiles(tpPrfs []*TPRateProfile) (err error) { + if len(tpPrfs) == 0 { + return nil + } + for _, tpPrf := range tpPrfs { + iDB.db.Set(utils.TBLTPRateProfiles, utils.ConcatenatedKey(tpPrf.TPid, tpPrf.Tenant, tpPrf.ID), tpPrf, nil, + cacheCommit(utils.NonTransactional), utils.NonTransactional) + } + return +} + //implement CdrStorage interface func (iDB *InternalDB) SetCDR(cdr *CDR, allowUpdate bool) (err error) { if cdr.OrderID == 0 { diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go index 83e2d2119..e112bc0a0 100644 --- a/engine/storage_mongo_datadb.go +++ b/engine/storage_mongo_datadb.go @@ -74,6 +74,7 @@ const ( ColCpp = "charger_profiles" ColDpp = "dispatcher_profiles" ColDph = "dispatcher_hosts" + ColRpp = "rate_profiles" ColLID = "load_ids" ) @@ -286,7 +287,7 @@ func (ms *MongoStorage) ensureIndexesForCol(col string) (err error) { // exporte if err = ms.enusureIndex(col, true, "key"); err != nil { return } - case ColRsP, ColRes, ColSqs, ColSqp, ColTps, ColThs, ColRts, ColAttr, ColFlt, ColCpp, ColDpp, ColDph: + case ColRsP, ColRes, ColSqs, ColSqp, ColTps, ColThs, ColRts, ColAttr, ColFlt, ColCpp, ColDpp, ColDph, ColRpp: if err = ms.enusureIndex(col, true, "tenant", "id"); err != nil { return } @@ -350,7 +351,7 @@ func (ms *MongoStorage) EnsureIndexes(cols ...string) (err error) { if ms.storageType == utils.DataDB { for _, col := range []string{ColAct, ColApl, ColAAp, ColAtr, ColRpl, ColDst, ColRds, ColLht, ColRFI, ColRsP, ColRes, ColSqs, ColSqp, - ColTps, ColThs, ColRts, ColAttr, ColFlt, ColCpp, ColDpp, + ColTps, ColThs, ColRts, ColAttr, ColFlt, ColCpp, ColDpp, ColRpp, ColRpf, ColShg, ColAcc} { if err = ms.ensureIndexesForCol(col); err != nil { return @@ -681,6 +682,8 @@ func (ms *MongoStorage) GetKeysForPrefix(prefix string) (result []string, err er result, err = ms.getField2(sctx, ColCpp, utils.ChargerProfilePrefix, subject, tntID) case utils.DispatcherProfilePrefix: result, err = ms.getField2(sctx, ColDpp, utils.DispatcherProfilePrefix, subject, tntID) + case utils.RateProfilePrefix: + result, err = ms.getField2(sctx, ColRpp, utils.RateProfilePrefix, subject, tntID) case utils.DispatcherHostPrefix: result, err = ms.getField2(sctx, ColDph, utils.DispatcherHostPrefix, subject, tntID) case utils.AttributeFilterIndexes: @@ -745,6 +748,8 @@ func (ms *MongoStorage) HasDataDrv(category, subject, tenant string) (has bool, count, err = ms.getCol(ColDpp).CountDocuments(sctx, bson.M{"tenant": tenant, "id": subject}) case utils.DispatcherHostPrefix: count, err = ms.getCol(ColDph).CountDocuments(sctx, bson.M{"tenant": tenant, "id": subject}) + case utils.RateProfilePrefix: + count, err = ms.getCol(ColRpp).CountDocuments(sctx, bson.M{"tenant": tenant, "id": subject}) default: err = fmt.Errorf("unsupported category in HasData: %s", category) } @@ -2325,3 +2330,39 @@ func (ms *MongoStorage) RemoveLoadIDsDrv() (err error) { return err }) } + +func (ms *MongoStorage) GetRateProfileDrv(tenant, id string) (rpp *RateProfile, err error) { + rpp = new(RateProfile) + err = ms.query(func(sctx mongo.SessionContext) (err error) { + cur := ms.getCol(ColRpp).FindOne(sctx, bson.M{"tenant": tenant, "id": id}) + if err := cur.Decode(rpp); err != nil { + rpp = nil + if err == mongo.ErrNoDocuments { + return utils.ErrNotFound + } + return err + } + return nil + }) + return +} + +func (ms *MongoStorage) SetRateProfileDrv(rpp *RateProfile) (err error) { + return ms.query(func(sctx mongo.SessionContext) (err error) { + _, err = ms.getCol(ColRpp).UpdateOne(sctx, bson.M{"tenant": rpp.Tenant, "id": rpp.ID}, + bson.M{"$set": rpp}, + options.Update().SetUpsert(true), + ) + return err + }) +} + +func (ms *MongoStorage) RemoveRateProfileDrv(tenant, id string) (err error) { + return ms.query(func(sctx mongo.SessionContext) (err error) { + dr, err := ms.getCol(ColRpp).DeleteOne(sctx, bson.M{"tenant": tenant, "id": id}) + if dr.DeletedCount == 0 { + return utils.ErrNotFound + } + return err + }) +} diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go index 2f8c3cfc5..cb7cfd33c 100644 --- a/engine/storage_mongo_stordb.go +++ b/engine/storage_mongo_stordb.go @@ -1531,6 +1531,54 @@ func (ms *MongoStorage) SetTPDispatcherHosts(tpDPPs []*utils.TPDispatcherHost) ( }) } +func (ms *MongoStorage) GetTPRateProfiles(tpid, tenant, id string) ([]*TPRateProfile, error) { + filter := bson.M{"tpid": tpid} + if id != "" { + filter["id"] = id + } + if tenant != "" { + filter["tenant"] = tenant + } + var results []*TPRateProfile + err := ms.query(func(sctx mongo.SessionContext) (err error) { + cur, err := ms.getCol(utils.TBLTPRateProfiles).Find(sctx, filter) + if err != nil { + return err + } + for cur.Next(sctx) { + var tp TPRateProfile + err := cur.Decode(&tp) + if err != nil { + return err + } + results = append(results, &tp) + } + if len(results) == 0 { + return utils.ErrNotFound + } + return cur.Close(sctx) + }) + return results, err +} + +func (ms *MongoStorage) SetTPRateProfiles(tpDPPs []*TPRateProfile) (err error) { + if len(tpDPPs) == 0 { + return + } + return ms.query(func(sctx mongo.SessionContext) (err error) { + for _, tp := range tpDPPs { + _, err = ms.getCol(utils.TBLTPRateProfiles).UpdateOne(sctx, bson.M{"tpid": tp.TPid, "id": tp.ID}, + bson.M{"$set": tp}, + options.Update().SetUpsert(true), + ) + if err != nil { + return err + } + } + return nil + }) +} + func (ms *MongoStorage) GetVersions(itm string) (vrs Versions, err error) { fop := options.FindOne() if itm != "" { diff --git a/engine/storage_redis.go b/engine/storage_redis.go index ea42c45c3..24e82ee63 100644 --- a/engine/storage_redis.go +++ b/engine/storage_redis.go @@ -414,7 +414,8 @@ func (rs *RedisStorage) HasDataDrv(category, subject, tenant string) (bool, erro case utils.ResourcesPrefix, utils.ResourceProfilesPrefix, utils.StatQueuePrefix, utils.StatQueueProfilePrefix, utils.ThresholdPrefix, utils.ThresholdProfilePrefix, utils.FilterPrefix, utils.RouteProfilePrefix, utils.AttributeProfilePrefix, - utils.ChargerProfilePrefix, utils.DispatcherProfilePrefix, utils.DispatcherHostPrefix: + utils.ChargerProfilePrefix, utils.DispatcherProfilePrefix, utils.DispatcherHostPrefix, + utils.RateProfilePrefix: i, err := rs.Cmd(redis_EXISTS, category+utils.ConcatenatedKey(tenant, subject)).Int() return i == 1, err } @@ -1716,3 +1717,34 @@ func (rs *RedisStorage) SetLoadIDsDrv(loadIDs map[string]int64) error { func (rs *RedisStorage) RemoveLoadIDsDrv() (err error) { return rs.Cmd(redis_DEL, utils.LoadIDs).Err } + +func (rs *RedisStorage) GetRateProfileDrv(tenant, id string) (rpp *RateProfile, err error) { + key := utils.RateProfilePrefix + utils.ConcatenatedKey(tenant, id) + var values []byte + if values, err = rs.Cmd(redis_GET, key).Bytes(); err != nil { + if err == redis.ErrRespNil { // did not find the destination + err = utils.ErrNotFound + } + return + } + if err = rs.ms.Unmarshal(values, &rpp); err != nil { + return + } + return +} + +func (rs *RedisStorage) SetRateProfileDrv(rpp *RateProfile) (err error) { + result, err := rs.ms.Marshal(rpp) + if err != nil { + return err + } + return rs.Cmd(redis_SET, utils.RateProfilePrefix+utils.ConcatenatedKey(rpp.Tenant, rpp.ID), result).Err +} + +func (rs *RedisStorage) RemoveRateProfileDrv(tenant, id string) (err error) { + key := utils.RateProfilePrefix + utils.ConcatenatedKey(tenant, id) + if err = rs.Cmd(redis_DEL, key).Err; err != nil { + return + } + return +} diff --git a/engine/storage_sql.go b/engine/storage_sql.go index ddf84c158..8e9829fba 100644 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -728,6 +728,28 @@ func (self *SQLStorage) SetTPDispatcherHosts(tpDPPs []*utils.TPDispatcherHost) e return nil } +func (self *SQLStorage) SetTPRateProfiles(tpDPPs []*TPRateProfile) error { + if len(tpDPPs) == 0 { + return nil + } + tx := self.db.Begin() + for _, dpp := range tpDPPs { + // Remove previous + if err := tx.Where(&RateProfileMdl{Tpid: dpp.TPid, ID: dpp.ID}).Delete(RateProfileMdl{}).Error; err != nil { + tx.Rollback() + return err + } + for _, mst := range APItoModelTPRateProfile(dpp) { + if err := tx.Save(&mst).Error; err != nil { + tx.Rollback() + return err + } + } + } + tx.Commit() + return nil +} + func (self *SQLStorage) SetSMCost(smc *SMCost) error { if smc.CostDetails == nil { return nil @@ -1573,6 +1595,27 @@ func (self *SQLStorage) GetTPDispatcherHosts(tpid, tenant, id string) ([]*utils. return arls, nil } +func (self *SQLStorage) GetTPRateProfiles(tpid, tenant, id string) ([]*TPRateProfile, error) { + var dpps RateProfileMdls + q := self.db.Where("tpid = ?", tpid) + if len(id) != 0 { + q = q.Where("id = ?", id) + } + if len(tenant) != 0 { + q = q.Where("tenant = ?", tenant) + } + if err := q.Find(&dpps).Error; err != nil { + return nil, err + } + arls, err := dpps.AsTPRateProfile() + if err != nil { + return nil, err + } else if len(arls) == 0 { + return arls, utils.ErrNotFound + } + return arls, nil +} + // GetVersions returns slice of all versions or a specific version if tag is specified func (self *SQLStorage) GetVersions(itm string) (vrs Versions, err error) { q := self.db.Model(&TBLVersion{}) diff --git a/engine/tpreader.go b/engine/tpreader.go index 94aed76e3..577a0c090 100644 --- a/engine/tpreader.go +++ b/engine/tpreader.go @@ -56,6 +56,7 @@ type TpReader struct { chargerProfiles map[utils.TenantID]*utils.TPChargerProfile dispatcherProfiles map[utils.TenantID]*utils.TPDispatcherProfile dispatcherHosts map[utils.TenantID]*utils.TPDispatcherHost + rateProfiles map[utils.TenantID]*TPRateProfile resources []*utils.TenantID // IDs of resources which need creation based on resourceProfiles statQueues []*utils.TenantID // IDs of statQueues which need creation based on statQueueProfiles thresholds []*utils.TenantID // IDs of thresholds which need creation based on thresholdProfiles @@ -103,6 +104,7 @@ func (tpr *TpReader) Init() { tpr.chargerProfiles = make(map[utils.TenantID]*utils.TPChargerProfile) tpr.dispatcherProfiles = make(map[utils.TenantID]*utils.TPDispatcherProfile) tpr.dispatcherHosts = make(map[utils.TenantID]*utils.TPDispatcherHost) + tpr.rateProfiles = make(map[utils.TenantID]*TPRateProfile) tpr.filters = make(map[utils.TenantID]*utils.TPFilterProfile) tpr.revDests = make(map[string][]string) tpr.acntActionPlans = make(map[string][]string) @@ -1240,6 +1242,23 @@ func (tpr *TpReader) LoadDispatcherHostsFiltered(tag string) (err error) { return nil } +func (tpr *TpReader) LoadRateProfiles() error { + return tpr.LoadRateProfilesFiltered("") +} + +func (tpr *TpReader) LoadRateProfilesFiltered(tag string) (err error) { + rls, err := tpr.lr.GetTPRateProfiles(tpr.tpid, "", tag) + if err != nil { + return err + } + mapRateProfiles := make(map[utils.TenantID]*TPRateProfile) + for _, rl := range rls { + mapRateProfiles[utils.TenantID{Tenant: rl.Tenant, ID: rl.ID}] = rl + } + tpr.rateProfiles = mapRateProfiles + return nil +} + func (tpr *TpReader) LoadDispatcherHosts() error { return tpr.LoadDispatcherHostsFiltered("") } @@ -1305,6 +1324,9 @@ func (tpr *TpReader) LoadAll() (err error) { if err = tpr.LoadDispatcherHosts(); err != nil && err.Error() != utils.NotFoundCaps { return } + if err = tpr.LoadRateProfiles(); err != nil && err.Error() != utils.NotFoundCaps { + return + } return nil } @@ -1710,6 +1732,25 @@ func (tpr *TpReader) WriteToDatabase(verbose, disable_reverse bool) (err error) loadIDs[utils.CacheDispatcherHosts] = loadID } + if verbose { + log.Print("RateProfiles:") + } + for _, tpTH := range tpr.rateProfiles { + th, err := APItoRateProfile(tpTH, tpr.timezone) + if err != nil { + return err + } + if err = tpr.dm.SetRateProfile(th, true); err != nil { + return err + } + if verbose { + log.Print("\t", th.TenantID()) + } + } + if len(tpr.rateProfiles) != 0 { + loadIDs[utils.CacheRateProfiles] = loadID + } + if verbose { log.Print("Timings:") } @@ -1817,6 +1858,8 @@ func (tpr *TpReader) ShowStatistics() { log.Print("DispatcherProfiles: ", len(tpr.dispatcherProfiles)) // Dispatcher Hosts log.Print("DispatcherHosts: ", len(tpr.dispatcherHosts)) + // Rate profiles + log.Print("RateProfiles: ", len(tpr.rateProfiles)) } // Returns the identities loaded for a specific category, useful for cache reloads @@ -1967,6 +2010,15 @@ func (tpr *TpReader) GetLoadedIds(categ string) ([]string, error) { i++ } return keys, nil + + case utils.RateProfilePrefix: + keys := make([]string, len(tpr.rateProfiles)) + i := 0 + for k := range tpr.rateProfiles { + keys[i] = k.TenantID() + i++ + } + return keys, nil } return nil, errors.New("Unsupported load category") } @@ -2209,6 +2261,19 @@ func (tpr *TpReader) RemoveFromDatabase(verbose, disable_reverse bool) (err erro } } + if verbose { + log.Print("RateProfiles:") + } + for _, tpRp := range tpr.rateProfiles { + if err = tpr.dm.RemoveRateProfile(tpRp.Tenant, tpRp.ID, + utils.NonTransactional, true); err != nil { + return err + } + if verbose { + log.Print("\t", utils.ConcatenatedKey(tpRp.Tenant, tpRp.ID)) + } + } + if verbose { log.Print("Timings:") } @@ -2314,6 +2379,9 @@ func (tpr *TpReader) RemoveFromDatabase(verbose, disable_reverse bool) (err erro if len(tpr.dispatcherHosts) != 0 { loadIDs[utils.CacheDispatcherHosts] = loadID } + if len(tpr.rateProfiles) != 0 { + loadIDs[utils.CacheRateProfiles] = loadID + } if len(tpr.timings) != 0 { loadIDs[utils.CacheTimings] = loadID } @@ -2349,6 +2417,7 @@ func (tpr *TpReader) ReloadCache(caching string, verbose bool, argDispatcher *ut chargerIDs, _ := tpr.GetLoadedIds(utils.ChargerProfilePrefix) dppIDs, _ := tpr.GetLoadedIds(utils.DispatcherProfilePrefix) dphIDs, _ := tpr.GetLoadedIds(utils.DispatcherHostPrefix) + ratePrfIDs, _ := tpr.GetLoadedIds(utils.RateProfilePrefix) aps, _ := tpr.GetLoadedIds(utils.ACTION_PLAN_PREFIX) //compose Reload Cache argument @@ -2376,6 +2445,7 @@ func (tpr *TpReader) ReloadCache(caching string, verbose bool, argDispatcher *ut ChargerProfileIDs: chargerIDs, DispatcherProfileIDs: dppIDs, DispatcherHostIDs: dphIDs, + RateProfileIDs: ratePrfIDs, }, } @@ -2427,6 +2497,9 @@ func (tpr *TpReader) ReloadCache(caching string, verbose bool, argDispatcher *ut if len(dppIDs) != 0 { cacheIDs = append(cacheIDs, utils.CacheDispatcherFilterIndexes) } + if len(ratePrfIDs) != 0 { + cacheIDs = append(cacheIDs, utils.CacheRateFilterIndexes) + } if verbose { log.Print("Clearing indexes") } diff --git a/general_tests/acntacts_test.go b/general_tests/acntacts_test.go index 5c431cff2..51cbfd4fa 100644 --- a/general_tests/acntacts_test.go +++ b/general_tests/acntacts_test.go @@ -58,7 +58,7 @@ ENABLE_ACNT,*enable_account,,,,,,,,,,,,,false,false,10` csvr, err := engine.NewTpReader(dbAcntActs.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, sharedGroups, actions, actionPlans, actionTriggers, accountActions, - resLimits, stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, ""), "", "", nil, nil) + resLimits, stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, "", utils.EmptyString), "", "", nil, nil) if err != nil { t.Error(err) } @@ -69,7 +69,7 @@ ENABLE_ACNT,*enable_account,,,,,,,,,,,,,false,false,10` engine.Cache.Clear(nil) dbAcntActs.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) expectAcnt := &engine.Account{ID: "cgrates.org:1"} if acnt, err := dbAcntActs.GetAccount("cgrates.org:1"); err != nil { diff --git a/general_tests/auth_test.go b/general_tests/auth_test.go index f7da0ee1e..d707d64bf 100644 --- a/general_tests/auth_test.go +++ b/general_tests/auth_test.go @@ -64,7 +64,7 @@ cgrates.org,call,*any,2013-01-06T00:00:00Z,RP_ANY,` chargerProfiles := `` csvr, err := engine.NewTpReader(dbAuth.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, sharedGroups, actions, actionPlans, actionTriggers, accountActions, - resLimits, stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, ""), "", "", nil, nil) + resLimits, stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, "", utils.EmptyString), "", "", nil, nil) if err != nil { t.Error(err) } @@ -80,7 +80,7 @@ cgrates.org,call,*any,2013-01-06T00:00:00Z,RP_ANY,` engine.Cache.Clear(nil) dbAuth.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) if cachedDests := len(engine.Cache.GetItemIDs(utils.CacheDestinations, "")); cachedDests != 0 { t.Error("Wrong number of cached destinations found", cachedDests) diff --git a/general_tests/costs1_test.go b/general_tests/costs1_test.go index 3c4c7156d..8d9beaa41 100644 --- a/general_tests/costs1_test.go +++ b/general_tests/costs1_test.go @@ -52,8 +52,13 @@ RP_SMS1,DR_SMS_1,ALWAYS,10` ratingProfiles := `cgrates.org,call,*any,2012-01-01T00:00:00Z,RP_RETAIL, cgrates.org,data,*any,2012-01-01T00:00:00Z,RP_DATA1, cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,` - csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, dests, timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "", nil, nil) + csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, dests, timings, + rates, destinationRates, ratingPlans, ratingProfiles, + utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString), utils.EmptyString, utils.EmptyString, nil, nil) if err != nil { t.Error(err) } @@ -78,19 +83,19 @@ cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,` csvr.WriteToDatabase(false, false) engine.Cache.Clear(nil) dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, "")); cachedRPlans != 3 { + if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, utils.EmptyString)); cachedRPlans != 3 { t.Error("Wrong number of cached rating plans found", cachedRPlans) } - if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, "")); cachedRProfiles != 0 { + if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, utils.EmptyString)); cachedRProfiles != 0 { t.Error("Wrong number of cached rating profiles found", cachedRProfiles) } } func TestCosts1GetCost1(t *testing.T) { - tStart, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", "") - tEnd, _ := utils.ParseTimeDetectLayout("2013-08-07T17:31:30Z", "") + tStart, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", utils.EmptyString) + tEnd, _ := utils.ParseTimeDetectLayout("2013-08-07T17:31:30Z", utils.EmptyString) cd := &engine.CallDescriptor{ Category: "call", Tenant: "cgrates.org", @@ -108,8 +113,8 @@ func TestCosts1GetCost1(t *testing.T) { } func TestCosts1GetCostZeroDuration(t *testing.T) { - tStart, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", "") - tEnd, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", "") + tStart, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", utils.EmptyString) + tEnd, _ := utils.ParseTimeDetectLayout("2013-08-07T17:30:00Z", utils.EmptyString) cd := &engine.CallDescriptor{ Category: "call", Tenant: "cgrates.org", diff --git a/general_tests/datachrg1_test.go b/general_tests/datachrg1_test.go index 99fc915d1..499b0d639 100644 --- a/general_tests/datachrg1_test.go +++ b/general_tests/datachrg1_test.go @@ -42,8 +42,11 @@ DR_DATA_2,*any,RT_DATA_1c,*up,4,0,` ratingPlans := `RP_DATA1,DR_DATA_1,TM1,10 RP_DATA1,DR_DATA_2,TM2,10` ratingProfiles := `cgrates.org,data,*any,2012-01-01T00:00:00Z,RP_DATA1,` - csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, "", timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "", nil, nil) + csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, utils.EmptyString, timings, rates, destinationRates, ratingPlans, ratingProfiles, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString), + utils.EmptyString, utils.EmptyString, nil, nil) if err != nil { t.Error(err) } @@ -65,12 +68,12 @@ RP_DATA1,DR_DATA_2,TM2,10` csvr.WriteToDatabase(false, false) engine.Cache.Clear(nil) dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, "")); cachedRPlans != 1 { + if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, utils.EmptyString)); cachedRPlans != 1 { t.Error("Wrong number of cached rating plans found", cachedRPlans) } - if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, "")); cachedRProfiles != 0 { + if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, utils.EmptyString)); cachedRProfiles != 0 { t.Error("Wrong number of cached rating profiles found", cachedRProfiles) } } diff --git a/general_tests/ddazmbl1_test.go b/general_tests/ddazmbl1_test.go index 703b8c8a8..a85edc8c7 100644 --- a/general_tests/ddazmbl1_test.go +++ b/general_tests/ddazmbl1_test.go @@ -67,7 +67,7 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` destinationRates, ratingPlans, ratingProfiles, sharedGroups, actions, actionPlans, actionTriggers, accountActions, resLimits, stats, thresholds, filters, suppliers, - attrProfiles, chargerProfiles, ``, ""), "", "", nil, nil) + attrProfiles, chargerProfiles, ``, "", utils.EmptyString), "", "", nil, nil) if err != nil { t.Error(err) } @@ -113,7 +113,7 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` engine.Cache.Clear(nil) dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) if cachedDests := len(engine.Cache.GetItemIDs(utils.CacheDestinations, "")); cachedDests != 0 { t.Error("Wrong number of cached destinations found", cachedDests) diff --git a/general_tests/ddazmbl2_test.go b/general_tests/ddazmbl2_test.go index 159e0d335..00954931e 100644 --- a/general_tests/ddazmbl2_test.go +++ b/general_tests/ddazmbl2_test.go @@ -65,7 +65,7 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` csvr, err := engine.NewTpReader(dataDB2.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, sharedGroups, actions, actionPlans, actionTriggers, accountActions, resLimits, - stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, ""), "", "", nil, nil) + stats, thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, "", utils.EmptyString), "", "", nil, nil) if err != nil { t.Error(err) } @@ -110,7 +110,7 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` } engine.Cache.Clear(nil) dataDB2.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) if cachedDests := len(engine.Cache.GetItemIDs(utils.CacheDestinations, "")); cachedDests != 0 { t.Error("Wrong number of cached destinations found", cachedDests) diff --git a/general_tests/ddazmbl3_test.go b/general_tests/ddazmbl3_test.go index b278b6ba1..bd19613d4 100644 --- a/general_tests/ddazmbl3_test.go +++ b/general_tests/ddazmbl3_test.go @@ -63,7 +63,7 @@ cgrates.org,call,discounted_minutes,2013-01-06T00:00:00Z,RP_UK_Mobile_BIG5_PKG,` csvr, err := engine.NewTpReader(dataDB3.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, sharedGroups, actions, actionPlans, actionTriggers, accountActions, resLimits, stats, - thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, ""), "", "", nil, nil) + thresholds, filters, suppliers, attrProfiles, chargerProfiles, ``, "", utils.EmptyString), "", "", nil, nil) if err != nil { t.Error(err) } @@ -109,7 +109,7 @@ cgrates.org,call,discounted_minutes,2013-01-06T00:00:00Z,RP_UK_Mobile_BIG5_PKG,` } engine.Cache.Clear(nil) dataDB3.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) if cachedDests := len(engine.Cache.GetItemIDs(utils.CacheDestinations, "")); cachedDests != 0 { t.Error("Wrong number of cached destinations found", cachedDests) diff --git a/general_tests/smschrg1_test.go b/general_tests/smschrg1_test.go index a436f98a2..a957b4baf 100644 --- a/general_tests/smschrg1_test.go +++ b/general_tests/smschrg1_test.go @@ -41,8 +41,11 @@ func TestSMSLoadCsvTpSmsChrg1(t *testing.T) { destinationRates := `DR_SMS_1,*any,RT_SMS_5c,*up,4,0,` ratingPlans := `RP_SMS1,DR_SMS_1,ALWAYS,10` ratingProfiles := `cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,` - csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, "", timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "", nil, nil) + csvr, err := engine.NewTpReader(dataDB.DataDB(), engine.NewStringCSVStorage(utils.CSV_SEP, utils.EmptyString, timings, rates, destinationRates, ratingPlans, ratingProfiles, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString, utils.EmptyString, + utils.EmptyString, utils.EmptyString, utils.EmptyString), utils.EmptyString, utils.EmptyString, nil, nil) if err != nil { t.Error(err) } @@ -64,12 +67,12 @@ func TestSMSLoadCsvTpSmsChrg1(t *testing.T) { csvr.WriteToDatabase(false, false) engine.Cache.Clear(nil) dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, "")); cachedRPlans != 1 { + if cachedRPlans := len(engine.Cache.GetItemIDs(utils.CacheRatingPlans, utils.EmptyString)); cachedRPlans != 1 { t.Error("Wrong number of cached rating plans found", cachedRPlans) } - if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, "")); cachedRProfiles != 0 { + if cachedRProfiles := len(engine.Cache.GetItemIDs(utils.CacheRatingProfiles, utils.EmptyString)); cachedRProfiles != 0 { t.Error("Wrong number of cached rating profiles found", cachedRProfiles) } } diff --git a/utils/apitpdata.go b/utils/apitpdata.go index 34decd585..486890c6b 100755 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -461,6 +461,7 @@ type ArgsCache struct { DispatcherProfileIDs []string DispatcherHostIDs []string DispatcherRoutesIDs []string + RateProfileIDs []string } type AttrExpFileCdrs struct { diff --git a/utils/consts.go b/utils/consts.go index 025e0526d..7ae3cf7d4 100755 --- a/utils/consts.go +++ b/utils/consts.go @@ -70,7 +70,7 @@ var ( CacheAttributeFilterIndexes, CacheChargerFilterIndexes, CacheDispatcherFilterIndexes, CacheDispatcherRoutes, CacheDispatcherLoads, CacheDiameterMessages, CacheRPCResponses, CacheClosedSessions, CacheCDRIDs, CacheLoadIDs, CacheRPCConnections, CacheRatingProfilesTmp, - CacheUCH, CacheSTIR, CacheEventCharges}) + CacheUCH, CacheSTIR, CacheEventCharges, CacheRateProfiles, CacheRateFilterIndexes}) CacheInstanceToPrefix = map[string]string{ CacheDestinations: DESTINATION_PREFIX, CacheReverseDestinations: REVERSE_DESTINATION_PREFIX, @@ -94,6 +94,7 @@ var ( CacheChargerProfiles: ChargerProfilePrefix, CacheDispatcherProfiles: DispatcherProfilePrefix, CacheDispatcherHosts: DispatcherHostPrefix, + CacheRateProfiles: RateProfilePrefix, CacheResourceFilterIndexes: ResourceFilterIndexes, CacheStatFilterIndexes: StatFilterIndexes, CacheThresholdFilterIndexes: ThresholdFilterIndexes, @@ -101,6 +102,7 @@ var ( CacheAttributeFilterIndexes: AttributeFilterIndexes, CacheChargerFilterIndexes: ChargerFilterIndexes, CacheDispatcherFilterIndexes: DispatcherFilterIndexes, + CacheRateFilterIndexes: RateFilterIndexes, CacheLoadIDs: LoadIDPrefix, CacheAccounts: ACCOUNT_PREFIX, } @@ -113,6 +115,7 @@ var ( AttributeProfilePrefix: CacheAttributeFilterIndexes, ChargerProfilePrefix: CacheChargerFilterIndexes, DispatcherProfilePrefix: CacheDispatcherFilterIndexes, + RateProfilePrefix: CacheRateFilterIndexes, } CacheIndexesToPrefix map[string]string // will be built on init @@ -129,7 +132,8 @@ var ( CacheFilters, CacheRouteProfiles, CacheAttributeProfiles, CacheChargerProfiles, CacheDispatcherProfiles, CacheDispatcherHosts, CacheResourceFilterIndexes, CacheStatFilterIndexes, CacheThresholdFilterIndexes, CacheRouteFilterIndexes, CacheAttributeFilterIndexes, - CacheChargerFilterIndexes, CacheDispatcherFilterIndexes, CacheLoadIDs, CacheAccounts}) + CacheChargerFilterIndexes, CacheDispatcherFilterIndexes, CacheLoadIDs, CacheAccounts, + CacheRateProfiles, CacheRateFilterIndexes}) CacheStorDBPartitions = NewStringSet([]string{TBLTPTimings, TBLTPDestinations, TBLTPRates, TBLTPDestinationRates, TBLTPRatingPlans, TBLTPRatingProfiles, TBLTPSharedGroups, @@ -270,6 +274,7 @@ const ( AttributeProfilePrefix = "alp_" ChargerProfilePrefix = "cpp_" DispatcherProfilePrefix = "dpp_" + RateProfilePrefix = "rep_" DispatcherHostPrefix = "dph_" ThresholdProfilePrefix = "thp_" StatQueuePrefix = "stq_" @@ -421,6 +426,7 @@ const ( Filters = "Filters" DispatcherProfiles = "DispatcherProfiles" DispatcherHosts = "DispatcherHosts" + RateProfiles = "RateProfiles" MetaEveryMinute = "*every_minute" MetaHourly = "*hourly" ID = "ID" @@ -542,6 +548,11 @@ const ( RoundingMethod = "RoundingMethod" RoundingDecimals = "RoundingDecimals" MaxCostStrategy = "MaxCostStrategy" + RateID = "RateID" + RateFilterIDs = "RateFilterIDs" + RateWeight = "RateWeight" + RateValue = "RateValue" + RateBlocker = "RateBlocker" TimingID = "TimingID" RatesID = "RatesID" RatingFiltersID = "RatingFiltersID" @@ -772,6 +783,7 @@ const ( MetaAttributeProfiles = "*attribute_profiles" MetaFilterIndexes = "*filter_indexes" MetaDispatcherProfiles = "*dispatcher_profiles" + MetaRateProfiles = "*rate_profiles" MetaChargerProfiles = "*charger_profiles" MetaSharedGroups = "*shared_groups" MetaThresholds = "*thresholds" @@ -1008,6 +1020,7 @@ const ( ReplicatorSv1GetAttributeProfile = "ReplicatorSv1.GetAttributeProfile" ReplicatorSv1GetChargerProfile = "ReplicatorSv1.GetChargerProfile" ReplicatorSv1GetDispatcherProfile = "ReplicatorSv1.GetDispatcherProfile" + ReplicatorSv1GetRateProfile = "ReplicatorSv1.GetRateProfile" ReplicatorSv1GetDispatcherHost = "ReplicatorSv1.GetDispatcherHost" ReplicatorSv1GetItemLoadIDs = "ReplicatorSv1.GetItemLoadIDs" ReplicatorSv1GetFilterIndexes = "ReplicatorSv1.GetFilterIndexes" @@ -1035,6 +1048,7 @@ const ( ReplicatorSv1SetAttributeProfile = "ReplicatorSv1.SetAttributeProfile" ReplicatorSv1SetChargerProfile = "ReplicatorSv1.SetChargerProfile" ReplicatorSv1SetDispatcherProfile = "ReplicatorSv1.SetDispatcherProfile" + ReplicatorSv1SetRateProfile = "ReplicatorSv1.SetRateProfile" ReplicatorSv1SetDispatcherHost = "ReplicatorSv1.SetDispatcherHost" ReplicatorSv1SetLoadIDs = "ReplicatorSv1.SetLoadIDs" ReplicatorSv1RemoveThreshold = "ReplicatorSv1.RemoveThreshold" @@ -1058,6 +1072,7 @@ const ( ReplicatorSv1RemoveAttributeProfile = "ReplicatorSv1.RemoveAttributeProfile" ReplicatorSv1RemoveChargerProfile = "ReplicatorSv1.RemoveChargerProfile" ReplicatorSv1RemoveDispatcherProfile = "ReplicatorSv1.RemoveDispatcherProfile" + ReplicatorSv1RemoveRateProfile = "ReplicatorSv1.RemoveRateProfile" ReplicatorSv1RemoveDispatcherHost = "ReplicatorSv1.RemoveDispatcherHost" ) @@ -1545,6 +1560,7 @@ const ( ChargersCsv = "Chargers.csv" DispatcherProfilesCsv = "DispatcherProfiles.csv" DispatcherHostsCsv = "DispatcherHosts.csv" + RateProfilesCsv = "RateProfiles.csv" ) // Table Name @@ -1573,6 +1589,7 @@ const ( OldSMCosts = "sm_costs" TBLTPDispatchers = "tp_dispatcher_profiles" TBLTPDispatcherHosts = "tp_dispatcher_hosts" + TBLTPRateProfiles = "tp_rate_profiles" ) // Cache Name @@ -1603,6 +1620,7 @@ const ( CacheDispatchers = "*dispatchers" CacheDispatcherRoutes = "*dispatcher_routes" CacheDispatcherLoads = "*dispatcher_loads" + CacheRateProfiles = "*rate_profiles" CacheResourceFilterIndexes = "*resource_filter_indexes" CacheStatFilterIndexes = "*stat_filter_indexes" CacheThresholdFilterIndexes = "*threshold_filter_indexes" @@ -1610,6 +1628,7 @@ const ( CacheAttributeFilterIndexes = "*attribute_filter_indexes" CacheChargerFilterIndexes = "*charger_filter_indexes" CacheDispatcherFilterIndexes = "*dispatcher_filter_indexes" + CacheRateFilterIndexes = "*rate_filter_indexes" CacheDiameterMessages = "*diameter_messages" CacheRPCResponses = "*rpc_responses" CacheClosedSessions = "*closed_sessions" @@ -1635,6 +1654,7 @@ const ( DispatcherFilterIndexes = "dfi_" ActionPlanIndexes = "api_" RouteFilterIndexes = "rti_" + RateFilterIndexes = "rei_" ) // Agents