diff --git a/dispatchers/replicator.go b/dispatchers/replicator.go
index bf68ed1dd..71d6d6875 100644
--- a/dispatchers/replicator.go
+++ b/dispatchers/replicator.go
@@ -129,23 +129,6 @@ func (dS *DispatcherService) ReplicatorSv1GetStatQueueProfile(args *utils.Tenant
}, utils.MetaReplicator, utils.ReplicatorSv1GetStatQueueProfile, args, reply)
}
-func (dS *DispatcherService) ReplicatorSv1GetTiming(args *utils.StringWithAPIOpts, rpl *utils.TPTiming) (err error) {
- if args == nil {
- args = new(utils.StringWithAPIOpts)
- }
- args.Tenant = utils.FirstNonEmpty(args.Tenant, dS.cfg.GeneralCfg().DefaultTenant)
- if len(dS.cfg.DispatcherSCfg().AttributeSConns) != 0 {
- if err = dS.authorize(utils.ReplicatorSv1GetTiming, args.Tenant,
- utils.IfaceAsString(args.APIOpts[utils.OptsAPIKey]), utils.TimePointer(time.Now())); err != nil {
- return
- }
- }
- return dS.Dispatch(&utils.CGREvent{
- Tenant: args.Tenant,
- APIOpts: args.APIOpts,
- }, utils.MetaReplicator, utils.ReplicatorSv1GetTiming, args, rpl)
-}
-
func (dS *DispatcherService) ReplicatorSv1GetResource(args *utils.TenantIDWithAPIOpts, reply *engine.Resource) (err error) {
tnt := dS.cfg.GeneralCfg().DefaultTenant
if args.TenantID != nil && args.TenantID.Tenant != utils.EmptyString {
@@ -421,23 +404,6 @@ func (dS *DispatcherService) ReplicatorSv1SetStatQueueProfile(args *engine.StatQ
}, utils.MetaReplicator, utils.ReplicatorSv1SetStatQueueProfile, args, rpl)
}
-func (dS *DispatcherService) ReplicatorSv1SetTiming(args *utils.TPTimingWithAPIOpts, rpl *string) (err error) {
- if args == nil {
- args = &utils.TPTimingWithAPIOpts{}
- }
- args.Tenant = utils.FirstNonEmpty(args.Tenant, dS.cfg.GeneralCfg().DefaultTenant)
- if len(dS.cfg.DispatcherSCfg().AttributeSConns) != 0 {
- if err = dS.authorize(utils.ReplicatorSv1SetTiming, args.Tenant,
- utils.IfaceAsString(args.APIOpts[utils.OptsAPIKey]), utils.TimePointer(time.Now())); err != nil {
- return
- }
- }
- return dS.Dispatch(&utils.CGREvent{
- Tenant: args.Tenant,
- APIOpts: args.APIOpts,
- }, utils.MetaReplicator, utils.ReplicatorSv1SetTiming, args, rpl)
-}
-
func (dS *DispatcherService) ReplicatorSv1SetResource(args *engine.ResourceWithAPIOpts, rpl *string) (err error) {
if args == nil {
args = &engine.ResourceWithAPIOpts{
@@ -723,23 +689,6 @@ func (dS *DispatcherService) ReplicatorSv1RemoveStatQueueProfile(args *utils.Ten
}, utils.MetaReplicator, utils.ReplicatorSv1RemoveStatQueueProfile, args, rpl)
}
-func (dS *DispatcherService) ReplicatorSv1RemoveTiming(args *utils.StringWithAPIOpts, rpl *string) (err error) {
- if args == nil {
- args = new(utils.StringWithAPIOpts)
- }
- args.Tenant = utils.FirstNonEmpty(args.Tenant, dS.cfg.GeneralCfg().DefaultTenant)
- if len(dS.cfg.DispatcherSCfg().AttributeSConns) != 0 {
- if err = dS.authorize(utils.ReplicatorSv1RemoveTiming, args.Tenant,
- utils.IfaceAsString(args.APIOpts[utils.OptsAPIKey]), utils.TimePointer(time.Now())); err != nil {
- return
- }
- }
- return dS.Dispatch(&utils.CGREvent{
- Tenant: args.Tenant,
- APIOpts: args.APIOpts,
- }, utils.MetaReplicator, utils.ReplicatorSv1RemoveTiming, args, rpl)
-}
-
func (dS *DispatcherService) ReplicatorSv1RemoveResource(args *utils.TenantIDWithAPIOpts, rpl *string) (err error) {
if args == nil {
args = &utils.TenantIDWithAPIOpts{
diff --git a/engine/datamanager.go b/engine/datamanager.go
index adc7263f0..252c9f246 100644
--- a/engine/datamanager.go
+++ b/engine/datamanager.go
@@ -44,7 +44,6 @@ var (
}
cachePrefixMap = utils.StringSet{
utils.ResourceProfilesPrefix: {},
- utils.TimingsPrefix: {},
utils.ResourcesPrefix: {},
utils.StatQueuePrefix: {},
utils.StatQueueProfilePrefix: {},
@@ -155,8 +154,6 @@ func (dm *DataManager) CacheDataFromDB(ctx *context.Context, prfx string, ids []
case utils.StatQueuePrefix:
tntID := utils.NewTenantID(dataID)
_, err = dm.GetStatQueue(tntID.Tenant, tntID.ID, false, true, utils.NonTransactional)
- case utils.TimingsPrefix:
- _, err = dm.GetTiming(dataID, true, utils.NonTransactional)
case utils.ThresholdProfilePrefix:
tntID := utils.NewTenantID(dataID)
_, err = dm.GetThresholdProfile(tntID.Tenant, tntID.ID, false, true, utils.NonTransactional)
@@ -938,97 +935,6 @@ func (dm *DataManager) RemoveStatQueueProfile(tenant, id,
return
}
-func (dm *DataManager) GetTiming(id string, skipCache bool,
- transactionID string) (t *utils.TPTiming, err error) {
- if !skipCache {
- if x, ok := Cache.Get(utils.CacheTimings, id); ok {
- if x == nil {
- return nil, utils.ErrNotFound
- }
- return x.(*utils.TPTiming), nil
- }
- }
- if dm == nil {
- err = utils.ErrNoDatabaseConn
- return
- }
- t, err = dm.dataDB.GetTimingDrv(id)
- if err != nil {
- if itm := config.CgrConfig().DataDbCfg().Items[utils.CacheTimings]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(context.TODO(), config.CgrConfig().DataDbCfg().RmtConns, utils.ReplicatorSv1GetTiming,
- &utils.StringWithAPIOpts{
- Arg: id,
- Tenant: config.CgrConfig().GeneralCfg().DefaultTenant,
- APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(config.CgrConfig().DataDbCfg().RmtConnID,
- config.CgrConfig().GeneralCfg().NodeID)),
- }, &t); err == nil {
- err = dm.dataDB.SetTimingDrv(t)
- }
- }
- if err != nil {
- err = utils.CastRPCErr(err)
- if err == utils.ErrNotFound {
- if errCh := Cache.Set(context.TODO(), utils.CacheTimings, id, nil, nil,
- cacheCommit(transactionID), transactionID); errCh != nil {
- return nil, errCh
- }
-
- }
- return nil, err
- }
- }
- if errCh := Cache.Set(context.TODO(), utils.CacheTimings, id, t, nil,
- cacheCommit(transactionID), transactionID); errCh != nil {
- return nil, errCh
- }
- return
-}
-
-func (dm *DataManager) SetTiming(ctx *context.Context, t *utils.TPTiming) (err error) {
- if dm == nil {
- return utils.ErrNoDatabaseConn
- }
- if err = dm.DataDB().SetTimingDrv(t); err != nil {
- return
- }
- if err = dm.CacheDataFromDB(ctx, utils.TimingsPrefix, []string{t.ID}, true); err != nil {
- return
- }
- if itm := config.CgrConfig().DataDbCfg().Items[utils.CacheTimings]; itm.Replicate {
- err = replicate(context.TODO(), dm.connMgr, config.CgrConfig().DataDbCfg().RplConns,
- config.CgrConfig().DataDbCfg().RplFiltered,
- utils.TimingsPrefix, t.ID, // this are used to get the host IDs from cache
- utils.ReplicatorSv1SetTiming,
- &utils.TPTimingWithAPIOpts{
- TPTiming: t,
- APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- config.CgrConfig().DataDbCfg().RplCache, utils.EmptyString)})
- }
- return
-}
-
-func (dm *DataManager) RemoveTiming(id, transactionID string) (err error) {
- if dm == nil {
- return utils.ErrNoDatabaseConn
- }
- if err = dm.DataDB().RemoveTimingDrv(id); err != nil {
- return
- }
- if errCh := Cache.Remove(context.TODO(), utils.CacheTimings, id,
- cacheCommit(transactionID), transactionID); errCh != nil {
- return errCh
- }
- if config.CgrConfig().DataDbCfg().Items[utils.CacheTimings].Replicate {
- replicate(context.TODO(), dm.connMgr, config.CgrConfig().DataDbCfg().RplConns,
- config.CgrConfig().DataDbCfg().RplFiltered,
- utils.TimingsPrefix, id, // this are used to get the host IDs from cache
- utils.ReplicatorSv1RemoveTiming,
- id)
- }
- return
-}
-
func (dm *DataManager) GetResource(tenant, id string, cacheRead, cacheWrite bool,
transactionID string) (rs *Resource, err error) {
tntID := utils.ConcatenatedKey(tenant, id)
diff --git a/engine/libtest.go b/engine/libtest.go
index 4f04dc5cc..4e9710f32 100644
--- a/engine/libtest.go
+++ b/engine/libtest.go
@@ -400,7 +400,6 @@ func GetDefaultEmptyCacheStats() map[string]*ltcache.CacheStats {
utils.CacheRateProfiles: {},
utils.CacheRateProfilesFilterIndexes: {},
utils.CacheRateFilterIndexes: {},
- utils.CacheTimings: {},
utils.CacheDiameterMessages: {},
utils.CacheClosedSessions: {},
utils.CacheLoadIDs: {},
@@ -418,7 +417,6 @@ func GetDefaultEmptyCacheStats() map[string]*ltcache.CacheStats {
utils.CacheReplicationHosts: {},
utils.CacheVersions: {},
- utils.CacheTBLTPTimings: {},
utils.CacheTBLTPResources: {},
utils.CacheTBLTPStats: {},
utils.CacheTBLTPThresholds: {},
diff --git a/engine/model_helpers.go b/engine/model_helpers.go
index 53eb3d199..7aad936ee 100644
--- a/engine/model_helpers.go
+++ b/engine/model_helpers.go
@@ -146,67 +146,6 @@ func getColumnCount(s interface{}) int {
return count
}
-type TimingMdls []TimingMdl
-
-func (tps TimingMdls) AsMapTPTimings() (map[string]*utils.ApierTPTiming, error) {
- result := make(map[string]*utils.ApierTPTiming)
- for _, tp := range tps {
- t := &utils.ApierTPTiming{
- TPid: tp.Tpid,
- ID: tp.Tag,
- Years: tp.Years,
- Months: tp.Months,
- MonthDays: tp.MonthDays,
- WeekDays: tp.WeekDays,
- Time: tp.Time,
- }
- result[tp.Tag] = t
- }
- return result, nil
-}
-
-func MapTPTimings(tps []*utils.ApierTPTiming) (map[string]*utils.TPTiming, error) {
- result := make(map[string]*utils.TPTiming)
- for _, tp := range tps {
- t := utils.NewTiming(tp.ID, tp.Years, tp.Months, tp.MonthDays, tp.WeekDays, tp.Time)
- if _, found := result[tp.ID]; found {
- return nil, fmt.Errorf("duplicate timing tag: %s", tp.ID)
- }
- result[tp.ID] = t
- }
- return result, nil
-}
-
-func (tps TimingMdls) AsTPTimings() (result []*utils.ApierTPTiming) {
- ats, _ := tps.AsMapTPTimings()
- for _, tp := range ats {
- result = append(result, tp)
- }
- return result
-}
-
-func APItoModelTiming(t *utils.ApierTPTiming) (result TimingMdl) {
- return TimingMdl{
- Tpid: t.TPid,
- Tag: t.ID,
- Years: t.Years,
- Months: t.Months,
- MonthDays: t.MonthDays,
- WeekDays: t.WeekDays,
- Time: t.Time,
- }
-}
-
-func APItoModelTimings(ts []*utils.ApierTPTiming) (result TimingMdls) {
- for _, t := range ts {
- if t != nil {
- at := APItoModelTiming(t)
- result = append(result, at)
- }
- }
- return result
-}
-
type ResourceMdls []*ResourceMdl
// CSVHeader return the header for csv fields as a slice of string
diff --git a/engine/models.go b/engine/models.go
index 1080734e4..50d01b64c 100644
--- a/engine/models.go
+++ b/engine/models.go
@@ -27,30 +27,6 @@ import (
// Structs here are one to one mapping of the tables and fields
// to be used by gorm orm
-type TimingMdl struct {
- ID int64
- Tpid string
- Tag string `index:"0" re:"\w+\s*,\s*"`
- Years string `index:"1" re:"\*any\s*,\s*|(?:\d{1,4};?)+\s*,\s*|\s*,\s*"`
- Months string `index:"2" re:"\*any\s*,\s*|(?:\d{1,4};?)+\s*,\s*|\s*,\s*"`
- MonthDays string `index:"3" re:"\*any\s*,\s*|(?:\d{1,4};?)+\s*,\s*|\s*,\s*"`
- WeekDays string `index:"4" re:"\*any\s*,\s*|(?:\d{1,4};?)+\s*,\s*|\s*,\s*"`
- Time string `index:"5" re:"\d{2}:\d{2}:\d{2}|\*asap"`
- CreatedAt time.Time
-}
-
-func (TimingMdl) TableName() string {
- return utils.TBLTPTimings
-}
-
-type DestinationMdl struct {
- ID int64
- Tpid string
- Tag string `index:"0" re:"\w+\s*,\s*"`
- Prefix string `index:"1" re:"\+?\d+.?\d*"`
- CreatedAt time.Time
-}
-
type ResourceMdl struct {
PK uint `gorm:"primary_key"`
Tpid string
diff --git a/engine/storage_csv.go b/engine/storage_csv.go
index 423b1e9ee..e94ae7910 100644
--- a/engine/storage_csv.go
+++ b/engine/storage_csv.go
@@ -60,14 +60,13 @@ type CSVStorage struct {
}
// NewCSVStorage creates a CSV storege that takes the data from the paths specified
-func NewCSVStorage(sep rune, timingsFn,
+func NewCSVStorage(sep rune,
resProfilesFn, statsFn, thresholdsFn, filterFn, routeProfilesFn,
attributeProfilesFn, chargerProfilesFn, dispatcherProfilesFn, dispatcherHostsFn,
rateProfilesFn, actionProfilesFn, accountsFn []string) *CSVStorage {
return &CSVStorage{
sep: sep,
generator: NewCsvFile,
- timingsFn: timingsFn,
resProfilesFn: resProfilesFn,
statsFn: statsFn,
thresholdsFn: thresholdsFn,
@@ -89,7 +88,6 @@ func NewFileCSVStorage(sep rune, dataPath string) *CSVStorage {
if err != nil {
log.Fatal(err)
}
- timingsPaths := appendName(allFoldersPath, utils.TimingsCsv)
resourcesPaths := appendName(allFoldersPath, utils.ResourcesCsv)
statsPaths := appendName(allFoldersPath, utils.StatsCsv)
thresholdsPaths := appendName(allFoldersPath, utils.ThresholdsCsv)
@@ -103,7 +101,6 @@ func NewFileCSVStorage(sep rune, dataPath string) *CSVStorage {
actionProfilesFn := appendName(allFoldersPath, utils.ActionProfilesCsv)
accountsFn := appendName(allFoldersPath, utils.AccountsCsv)
return NewCSVStorage(sep,
- timingsPaths,
resourcesPaths,
statsPaths,
thresholdsPaths,
@@ -124,7 +121,7 @@ func NewStringCSVStorage(sep rune, timingsFn,
resProfilesFn, statsFn, thresholdsFn, filterFn, routeProfilesFn,
attributeProfilesFn, chargerProfilesFn, dispatcherProfilesFn, dispatcherHostsFn,
rateProfilesFn, actionProfilesFn, accountsFn string) *CSVStorage {
- c := NewCSVStorage(sep, []string{timingsFn},
+ c := NewCSVStorage(sep,
[]string{resProfilesFn}, []string{statsFn}, []string{thresholdsFn}, []string{filterFn},
[]string{routeProfilesFn}, []string{attributeProfilesFn}, []string{chargerProfilesFn},
[]string{dispatcherProfilesFn}, []string{dispatcherHostsFn}, []string{rateProfilesFn},
@@ -150,7 +147,6 @@ func NewGoogleCSVStorage(sep rune, spreadsheetID string) (*CSVStorage, error) {
return []string{}
}
c := NewCSVStorage(sep,
- getIfExist(utils.Timings),
getIfExist(utils.Resources),
getIfExist(utils.Stats),
getIfExist(utils.Thresholds),
@@ -174,7 +170,6 @@ func NewGoogleCSVStorage(sep rune, spreadsheetID string) (*CSVStorage, error) {
// NewURLCSVStorage returns a CSVStorage that can parse URLs
func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage {
- var timingsPaths []string
var resourcesPaths []string
var statsPaths []string
var thresholdsPaths []string
@@ -190,7 +185,6 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage {
for _, baseURL := range strings.Split(dataPath, utils.InfieldSep) {
if !strings.HasSuffix(baseURL, utils.CSVSuffix) {
- timingsPaths = append(timingsPaths, joinURL(baseURL, utils.TimingsCsv))
resourcesPaths = append(resourcesPaths, joinURL(baseURL, utils.ResourcesCsv))
statsPaths = append(statsPaths, joinURL(baseURL, utils.StatsCsv))
thresholdsPaths = append(thresholdsPaths, joinURL(baseURL, utils.ThresholdsCsv))
@@ -206,8 +200,6 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage {
continue
}
switch {
- case strings.HasSuffix(baseURL, utils.TimingsCsv):
- timingsPaths = append(timingsPaths, baseURL)
case strings.HasSuffix(baseURL, utils.ResourcesCsv):
resourcesPaths = append(resourcesPaths, baseURL)
case strings.HasSuffix(baseURL, utils.StatsCsv):
@@ -237,7 +229,6 @@ func NewURLCSVStorage(sep rune, dataPath string) *CSVStorage {
}
c := NewCSVStorage(sep,
- timingsPaths,
resourcesPaths,
statsPaths,
thresholdsPaths,
@@ -318,18 +309,6 @@ func (csvs *CSVStorage) proccesData(listType interface{}, fns []string, process
return nil
}
-func (csvs *CSVStorage) GetTPTimings(tpid, id string) ([]*utils.ApierTPTiming, error) {
- var tpTimings TimingMdls
- if err := csvs.proccesData(TimingMdl{}, csvs.timingsFn, func(tp interface{}) {
- tm := tp.(TimingMdl)
- tm.Tpid = tpid
- tpTimings = append(tpTimings, tm)
- }); err != nil {
- return nil, err
- }
- return tpTimings.AsTPTimings(), nil
-}
-
func (csvs *CSVStorage) GetTPResources(tpid, tenant, id string) ([]*utils.TPResourceProfile, error) {
var tpResLimits ResourceMdls
if err := csvs.proccesData(ResourceMdl{}, csvs.resProfilesFn, func(tp interface{}) {
diff --git a/engine/storage_interface.go b/engine/storage_interface.go
index 13161f780..e85910bcc 100644
--- a/engine/storage_interface.go
+++ b/engine/storage_interface.go
@@ -54,9 +54,6 @@ type DataDB interface {
GetResourceDrv(string, string) (*Resource, error)
SetResourceDrv(*Resource) error
RemoveResourceDrv(string, string) error
- GetTimingDrv(string) (*utils.TPTiming, error)
- SetTimingDrv(*utils.TPTiming) error
- RemoveTimingDrv(string) error
GetLoadHistory(int, bool, string) ([]*utils.LoadInstance, error)
AddLoadHistory(*utils.LoadInstance, int, string) error
GetIndexesDrv(ctx *context.Context, idxItmType, tntCtx, idxKey string) (indexes map[string]utils.StringSet, err error)
@@ -136,7 +133,6 @@ type LoadReader interface {
GetTpIds(string) ([]string, error)
GetTpTableIds(string, string, []string,
map[string]string, *utils.PaginatorWithSearch) ([]string, error)
- GetTPTimings(string, string) ([]*utils.ApierTPTiming, error)
GetTPResources(string, string, string) ([]*utils.TPResourceProfile, error)
GetTPStats(string, string, string) ([]*utils.TPStatProfile, error)
GetTPThresholds(string, string, string) ([]*utils.TPThresholdProfile, error)
@@ -153,7 +149,6 @@ type LoadReader interface {
type LoadWriter interface {
RemTpData(string, string, map[string]string) error
- SetTPTimings([]*utils.ApierTPTiming) error
SetTPResources([]*utils.TPResourceProfile) error
SetTPStats([]*utils.TPStatProfile) error
SetTPThresholds([]*utils.TPThresholdProfile) error
diff --git a/engine/storage_internal_datadb.go b/engine/storage_internal_datadb.go
index 90763a4c7..032dec6e7 100644
--- a/engine/storage_internal_datadb.go
+++ b/engine/storage_internal_datadb.go
@@ -217,26 +217,6 @@ func (iDB *InternalDB) RemoveResourceDrv(tenant, id string) (err error) {
return
}
-func (iDB *InternalDB) GetTimingDrv(id string) (tmg *utils.TPTiming, err error) {
- x, ok := Cache.Get(utils.CacheTimings, id)
- if !ok || x == nil {
- return nil, utils.ErrNotFound
- }
- return x.(*utils.TPTiming), nil
-}
-
-func (iDB *InternalDB) SetTimingDrv(timing *utils.TPTiming) (err error) {
- Cache.SetWithoutReplicate(utils.CacheTimings, timing.ID, timing, nil,
- cacheCommit(utils.NonTransactional), utils.NonTransactional)
- return
-}
-
-func (iDB *InternalDB) RemoveTimingDrv(id string) (err error) {
- Cache.RemoveWithoutReplicate(utils.CacheTimings, id,
- cacheCommit(utils.NonTransactional), utils.NonTransactional)
- return
-}
-
func (iDB *InternalDB) GetLoadHistory(int, bool, string) ([]*utils.LoadInstance, error) {
return nil, nil
}
diff --git a/engine/storage_internal_stordb.go b/engine/storage_internal_stordb.go
index ffd1b5ea9..932d1687e 100644
--- a/engine/storage_internal_stordb.go
+++ b/engine/storage_internal_stordb.go
@@ -55,26 +55,6 @@ func (iDB *InternalDB) GetTpTableIds(tpid, table string, distinct []string,
return
}
-func (iDB *InternalDB) GetTPTimings(tpid, id string) (timings []*utils.ApierTPTiming, err error) {
- key := tpid
- if id != utils.EmptyString {
- key += utils.ConcatenatedKeySep + id
- }
-
- ids := Cache.GetItemIDs(utils.CacheTBLTPTimings, key)
- for _, id := range ids {
- x, ok := Cache.Get(utils.CacheTBLTPTimings, id)
- if !ok || x == nil {
- return nil, utils.ErrNotFound
- }
- timings = append(timings, x.(*utils.ApierTPTiming))
- }
- if len(timings) == 0 {
- return nil, utils.ErrNotFound
- }
- return
-}
-
func (iDB *InternalDB) GetTPResources(tpid, tenant, id string) (resources []*utils.TPResourceProfile, err error) {
key := tpid
if tenant != utils.EmptyString {
@@ -370,17 +350,6 @@ func (iDB *InternalDB) RemTpData(table, tpid string, args map[string]string) (er
return
}
-func (iDB *InternalDB) SetTPTimings(timings []*utils.ApierTPTiming) (err error) {
- if len(timings) == 0 {
- return nil
- }
- for _, timing := range timings {
- Cache.SetWithoutReplicate(utils.CacheTBLTPTimings, utils.ConcatenatedKey(timing.TPid, timing.ID), timing, nil,
- cacheCommit(utils.NonTransactional), utils.NonTransactional)
- }
- return
-}
-
func (iDB *InternalDB) SetTPResources(resources []*utils.TPResourceProfile) (err error) {
if len(resources) == 0 {
return nil
diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go
index f90d302cb..a85555b15 100644
--- a/engine/storage_mongo_datadb.go
+++ b/engine/storage_mongo_datadb.go
@@ -317,14 +317,6 @@ func (ms *MongoStorage) ensureIndexesForCol(col string) (err error) { // exporte
if err = ms.enusureIndex(col, true, "id"); err != nil {
return
}
- //StorDB
- case utils.TBLTPTimings,
- utils.TBLTPStats, utils.TBLTPResources, utils.TBLTPDispatchers,
- utils.TBLTPDispatcherHosts, utils.TBLTPChargers,
- utils.TBLTPRoutes, utils.TBLTPThresholds:
- if err = ms.enusureIndex(col, true, "tpid", "id"); err != nil {
- return
- }
case utils.CDRsTBL:
if err = ms.enusureIndex(col, true, CGRIDLow, RunIDLow,
OriginIDLow); err != nil {
@@ -372,15 +364,6 @@ func (ms *MongoStorage) EnsureIndexes(cols ...string) (err error) {
}
}
}
- if ms.storageType == utils.StorDB {
- for _, col := range []string{utils.TBLTPTimings,
- utils.TBLTPStats, utils.TBLTPResources,
- utils.CDRsTBL, utils.SessionCostsTBL} {
- if err = ms.ensureIndexesForCol(col); err != nil {
- return
- }
- }
- }
return
}
@@ -419,8 +402,6 @@ func (ms *MongoStorage) RemoveKeysForPrefix(prefix string) (err error) {
colName = ColLht
case utils.VersionPrefix:
colName = ColVer
- case utils.TimingsPrefix:
- colName = ColTmg
case utils.ResourcesPrefix:
colName = ColRes
case utils.ResourceProfilesPrefix:
@@ -559,8 +540,6 @@ func (ms *MongoStorage) GetKeysForPrefix(ctx *context.Context, prefix string) (r
result, err = ms.getField2(sctx, ColSqs, utils.StatQueuePrefix, subject, tntID)
case utils.StatQueueProfilePrefix:
result, err = ms.getField2(sctx, ColSqp, utils.StatQueueProfilePrefix, subject, tntID)
- case utils.TimingsPrefix:
- result, err = ms.getField(sctx, ColTmg, utils.TimingsPrefix, subject, "id")
case utils.FilterPrefix:
result, err = ms.getField2(sctx, ColFlt, utils.FilterPrefix, subject, tntID)
case utils.ThresholdPrefix:
diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go
index 707e060a6..0ad049379 100644
--- a/engine/storage_mongo_stordb.go
+++ b/engine/storage_mongo_stordb.go
@@ -153,33 +153,6 @@ func (ms *MongoStorage) GetTpTableIds(tpid, table string, distinct []string,
return distinctIds.AsSlice(), nil
}
-func (ms *MongoStorage) GetTPTimings(tpid, id string) ([]*utils.ApierTPTiming, error) {
- filter := bson.M{"tpid": tpid}
- if id != "" {
- filter["id"] = id
- }
- var results []*utils.ApierTPTiming
- err := ms.query(context.TODO(), func(sctx mongo.SessionContext) (err error) {
- cur, err := ms.getCol(utils.TBLTPTimings).Find(sctx, filter)
- if err != nil {
- return err
- }
- for cur.Next(sctx) {
- var el utils.ApierTPTiming
- err := cur.Decode(&el)
- if err != nil {
- return err
- }
- results = append(results, &el)
- }
- if len(results) == 0 {
- return utils.ErrNotFound
- }
- return cur.Close(sctx)
- })
- return results, err
-}
-
func (ms *MongoStorage) GetTPResources(tpid, tenant, id string) ([]*utils.TPResourceProfile, error) {
filter := bson.M{"tpid": tpid}
if id != "" {
@@ -285,24 +258,6 @@ func (ms *MongoStorage) RemTpData(table, tpid string, args map[string]string) er
})
}
-func (ms *MongoStorage) SetTPTimings(tps []*utils.ApierTPTiming) error {
- if len(tps) == 0 {
- return nil
- }
- return ms.query(context.TODO(), func(sctx mongo.SessionContext) (err error) {
- for _, tp := range tps {
- _, err = ms.getCol(utils.TBLTPTimings).UpdateOne(sctx, bson.M{"tpid": tp.TPid, "id": tp.ID},
- bson.M{"$set": tp},
- options.Update().SetUpsert(true),
- )
- if err != nil {
- return err
- }
- }
- return nil
- })
-}
-
func (ms *MongoStorage) SetTPResources(tpRLs []*utils.TPResourceProfile) (err error) {
if len(tpRLs) == 0 {
return
diff --git a/engine/storage_redis.go b/engine/storage_redis.go
index 5c4a0b2eb..c97e4d269 100644
--- a/engine/storage_redis.go
+++ b/engine/storage_redis.go
@@ -381,30 +381,6 @@ func (rs *RedisStorage) RemoveResourceDrv(tenant, id string) (err error) {
return rs.Cmd(nil, redisDEL, utils.ResourcesPrefix+utils.ConcatenatedKey(tenant, id))
}
-func (rs *RedisStorage) GetTimingDrv(id string) (t *utils.TPTiming, err error) {
- var values []byte
- if err = rs.Cmd(&values, redisGET, utils.TimingsPrefix+id); err != nil {
- return
- } else if len(values) == 0 {
- err = utils.ErrNotFound
- return
- }
- err = rs.ms.Unmarshal(values, &t)
- return
-}
-
-func (rs *RedisStorage) SetTimingDrv(t *utils.TPTiming) (err error) {
- var result []byte
- if result, err = rs.ms.Marshal(t); err != nil {
- return
- }
- return rs.Cmd(nil, redisSET, utils.TimingsPrefix+t.ID, string(result))
-}
-
-func (rs *RedisStorage) RemoveTimingDrv(id string) (err error) {
- return rs.Cmd(nil, redisDEL, utils.TimingsPrefix+id)
-}
-
func (rs *RedisStorage) GetVersions(itm string) (vrs Versions, err error) {
if itm != "" {
var fldVal int64
diff --git a/engine/storage_sql.go b/engine/storage_sql.go
index b0485a1f1..95a020d5d 100644
--- a/engine/storage_sql.go
+++ b/engine/storage_sql.go
@@ -99,7 +99,6 @@ func (sqls *SQLStorage) CreateTablesFromScript(scriptPath string) error {
func (sqls *SQLStorage) IsDBEmpty() (resp bool, err error) {
tbls := []string{
- utils.TBLTPTimings,
utils.TBLTPResources, utils.TBLTPStats, utils.TBLTPThresholds,
utils.TBLTPFilters, utils.SessionCostsTBL, utils.CDRsTBL,
utils.TBLVersions, utils.TBLTPRoutes, utils.TBLTPAttributes, utils.TBLTPChargers,
@@ -122,7 +121,6 @@ func (sqls *SQLStorage) GetTpIds(colName string) ([]string, error) {
var qryStr string
if colName == "" {
for _, clNm := range []string{
- utils.TBLTPTimings,
utils.TBLTPResources,
utils.TBLTPStats,
utils.TBLTPThresholds,
@@ -228,7 +226,7 @@ func (sqls *SQLStorage) RemTpData(table, tpid string, args map[string]string) er
tx := sqls.db.Begin()
if len(table) == 0 { // Remove tpid out of all tables
- for _, tblName := range []string{utils.TBLTPTimings,
+ for _, tblName := range []string{
utils.TBLTPResources, utils.TBLTPStats, utils.TBLTPThresholds,
utils.TBLTPFilters, utils.TBLTPRoutes, utils.TBLTPAttributes,
utils.TBLTPChargers, utils.TBLTPDispatchers, utils.TBLTPDispatcherHosts, utils.TBLTPAccounts,
@@ -255,27 +253,6 @@ func (sqls *SQLStorage) RemTpData(table, tpid string, args map[string]string) er
return nil
}
-func (sqls *SQLStorage) SetTPTimings(timings []*utils.ApierTPTiming) error {
- if len(timings) == 0 {
- return nil
- }
-
- tx := sqls.db.Begin()
- for _, timing := range timings {
- if err := tx.Where(&TimingMdl{Tpid: timing.TPid, Tag: timing.ID}).Delete(TimingMdl{}).Error; err != nil {
- tx.Rollback()
- return err
- }
- t := APItoModelTiming(timing)
- if err := tx.Create(&t).Error; err != nil {
- tx.Rollback()
- return err
- }
- }
- tx.Commit()
- return nil
-}
-
func (sqls *SQLStorage) SetTPResources(rls []*utils.TPResourceProfile) error {
if len(rls) == 0 {
return nil
@@ -834,22 +811,6 @@ func (sqls *SQLStorage) GetCDRs(qryFltr *utils.CDRsFilter, remove bool) ([]*CDR,
return cdrs, 0, nil
}
-func (sqls *SQLStorage) GetTPTimings(tpid, id string) ([]*utils.ApierTPTiming, error) {
- var tpTimings TimingMdls
- q := sqls.db.Where("tpid = ?", tpid)
- if len(id) != 0 {
- q = q.Where("tag = ?", id)
- }
- if err := q.Find(&tpTimings).Error; err != nil {
- return nil, err
- }
- ts := tpTimings.AsTPTimings()
- if len(ts) == 0 {
- return ts, utils.ErrNotFound
- }
- return ts, nil
-}
-
func (sqls *SQLStorage) GetTPResources(tpid, tenant, id string) ([]*utils.TPResourceProfile, error) {
var rls ResourceMdls
q := sqls.db.Where("tpid = ?", tpid)
diff --git a/engine/tpexporter.go b/engine/tpexporter.go
index c5f2d5d55..5fe34d50e 100644
--- a/engine/tpexporter.go
+++ b/engine/tpexporter.go
@@ -85,17 +85,6 @@ func (tpExp *TPExporter) Run() error {
var withError bool
toExportMap := make(map[string][]interface{})
- storDataTimings, err := tpExp.storDB.GetTPTimings(tpExp.tpID, "")
- if err != nil && err.Error() != utils.ErrNotFound.Error() {
- utils.Logger.Warning(fmt.Sprintf("<%s> error: %s, when getting %s from stordb for export", utils.AdminS, err, utils.TpTiming))
- withError = true
-
- }
- storDataModelTimings := APItoModelTimings(storDataTimings)
- for i, sd := range storDataModelTimings {
- toExportMap[utils.TimingsCsv][i] = sd
- }
-
storDataResources, err := tpExp.storDB.GetTPResources(tpExp.tpID, "", "")
if err != nil && err.Error() != utils.ErrNotFound.Error() {
utils.Logger.Warning(fmt.Sprintf("<%s> error: %s, when getting %s from stordb for export", utils.AdminS, err, utils.TpResources))
diff --git a/engine/tpimporter_csv.go b/engine/tpimporter_csv.go
index c84b1208f..1afe0e396 100644
--- a/engine/tpimporter_csv.go
+++ b/engine/tpimporter_csv.go
@@ -40,7 +40,6 @@ type TPCSVImporter struct {
// Maps csv file to handler which should process it. Defined like this since tests on 1.0.3 were failing on Travis.
// Change it to func(string) error as soon as Travis updates.
var fileHandlers = map[string]func(*TPCSVImporter, string) error{
- utils.TimingsCsv: (*TPCSVImporter).importTimings,
utils.ResourcesCsv: (*TPCSVImporter).importResources,
utils.StatsCsv: (*TPCSVImporter).importStats,
utils.ThresholdsCsv: (*TPCSVImporter).importThresholds,
@@ -75,22 +74,6 @@ func (tpImp *TPCSVImporter) Run() error {
return nil
}
-// Handler importing timings from file, saved row by row to storDb
-func (tpImp *TPCSVImporter) importTimings(fn string) error {
- if tpImp.Verbose {
- log.Printf("Processing file: <%s> ", fn)
- }
- tps, err := tpImp.csvr.GetTPTimings(tpImp.TPid, "")
- if err != nil {
- return err
- }
- for i := 0; i < len(tps); i++ {
- tps[i].TPid = tpImp.TPid
- }
-
- return tpImp.StorDB.SetTPTimings(tps)
-}
-
func (tpImp *TPCSVImporter) importResources(fn string) error {
if tpImp.Verbose {
log.Printf("Processing file: <%s> ", fn)
diff --git a/engine/tpreader.go b/engine/tpreader.go
index 716d6cdb1..ef084f94c 100644
--- a/engine/tpreader.go
+++ b/engine/tpreader.go
@@ -93,24 +93,6 @@ func (tpr *TpReader) Init() {
tpr.acntActionPlans = make(map[string][]string)
}
-func (tpr *TpReader) LoadTimings() (err error) {
- tps, err := tpr.lr.GetTPTimings(tpr.tpid, "")
- if err != nil {
- return err
- }
- var tpTimings map[string]*utils.TPTiming
- if tpTimings, err = MapTPTimings(tps); err != nil {
- return
- }
- // add default timings
- tpr.addDefaultTimings()
- // add timings defined by user
- for timingID, timing := range tpTimings {
- tpr.timings[timingID] = timing
- }
- return err
-}
-
func (tpr *TpReader) LoadResourceProfilesFiltered(tag string) (err error) {
rls, err := tpr.lr.GetTPResources(tpr.tpid, "", tag)
if err != nil {
@@ -358,9 +340,6 @@ func (tpr *TpReader) LoadDispatcherHosts() error {
}
func (tpr *TpReader) LoadAll() (err error) {
- if err = tpr.LoadTimings(); err != nil && err.Error() != utils.NotFoundCaps {
- return
- }
if err = tpr.LoadFilters(); err != nil && err.Error() != utils.NotFoundCaps {
return
}
@@ -726,21 +705,6 @@ func (tpr *TpReader) WriteToDatabase(verbose, disableReverse bool) (err error) {
loadIDs[utils.CacheAccounts] = loadID
}
- if verbose {
- log.Print("Timings:")
- }
- for _, t := range tpr.timings {
- if err = tpr.dm.SetTiming(context.Background(), t); err != nil {
- return
- }
- if verbose {
- log.Print("\t", t.ID)
- }
- }
- if len(tpr.timings) != 0 {
- loadIDs[utils.CacheTimings] = loadID
- }
-
return tpr.dm.SetLoadIDs(context.TODO(), loadIDs)
}
@@ -790,14 +754,6 @@ func (tpr *TpReader) GetLoadedIds(categ string) ([]string, error) {
keys[i] = k.TenantID()
}
return keys, nil
- case utils.TimingsPrefix:
- keys := make([]string, len(tpr.timings))
- i := 0
- for k := range tpr.timings {
- keys[i] = k
- i++
- }
- return keys, nil
case utils.ResourceProfilesPrefix:
keys := make([]string, len(tpr.resProfiles))
i := 0
@@ -1065,17 +1021,6 @@ func (tpr *TpReader) RemoveFromDatabase(verbose, disableReverse bool) (err error
}
}
- if verbose {
- log.Print("Timings:")
- }
- for _, t := range tpr.timings {
- if err = tpr.dm.RemoveTiming(t.ID, utils.NonTransactional); err != nil {
- return
- }
- if verbose {
- log.Print("\t", t.ID)
- }
- }
//We remove the filters at the end because of indexes
if verbose {
log.Print("Filters:")
@@ -1134,9 +1079,6 @@ func (tpr *TpReader) RemoveFromDatabase(verbose, disableReverse bool) (err error
if len(tpr.accounts) != 0 {
loadIDs[utils.CacheAccounts] = loadID
}
- if len(tpr.timings) != 0 {
- loadIDs[utils.CacheTimings] = loadID
- }
return tpr.dm.SetLoadIDs(context.TODO(), loadIDs)
}
@@ -1149,7 +1091,6 @@ func (tpr *TpReader) ReloadCache(ctx *context.Context, caching string, verbose b
return
}
// take IDs for each type
- tmgIds, _ := tpr.GetLoadedIds(utils.TimingsPrefix)
rspIDs, _ := tpr.GetLoadedIds(utils.ResourceProfilesPrefix)
resIDs, _ := tpr.GetLoadedIds(utils.ResourcesPrefix)
stqIDs, _ := tpr.GetLoadedIds(utils.StatQueuePrefix)
@@ -1168,7 +1109,6 @@ func (tpr *TpReader) ReloadCache(ctx *context.Context, caching string, verbose b
//compose Reload Cache argument
cacheArgs := map[string][]string{
- utils.TimingIDs: tmgIds,
utils.ResourceProfileIDs: rspIDs,
utils.ResourceIDs: resIDs,
utils.StatsQueueIDs: stqIDs,
diff --git a/engine/version.go b/engine/version.go
index 22b75133a..19de6602b 100644
--- a/engine/version.go
+++ b/engine/version.go
@@ -146,7 +146,6 @@ func CurrentDataDBVersions() Versions {
utils.Thresholds: 4,
utils.Routes: 2,
utils.Attributes: 7,
- utils.Timing: 1,
utils.RQF: 5,
utils.Resource: 1,
utils.Subscribers: 1,
@@ -169,7 +168,6 @@ func CurrentStorDBVersions() Versions {
utils.TpRoutes: 1,
utils.TpStats: 1,
utils.TpResources: 1,
- utils.TpTiming: 1,
utils.TpResource: 1,
utils.TpChargers: 1,
utils.TpDispatchers: 1,
diff --git a/migrator/migrator.go b/migrator/migrator.go
index d08d4d548..a8e29016a 100644
--- a/migrator/migrator.go
+++ b/migrator/migrator.go
@@ -120,8 +120,6 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
//only Move
case utils.MetaActionProfiles:
err = m.migrateActionProfiles()
- case utils.CacheTimings:
- err = m.migrateTimings()
case utils.MetaResources:
err = m.migrateResources()
case utils.MetaRateProfiles:
@@ -147,8 +145,6 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
err = m.migrateTPActionProfiles()
case utils.MetaTpResources:
err = m.migrateTPresources()
- case utils.MetaTpTimings:
- err = m.migrateTpTimings()
case utils.MetaTpChargers:
err = m.migrateTPChargers()
case utils.MetaTpDispatchers:
@@ -169,9 +165,6 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
if err := m.migrateAttributeProfile(); err != nil {
log.Print("ERROR: ", utils.MetaAttributes, " ", err)
}
- if err := m.migrateTimings(); err != nil {
- log.Print("ERROR: ", utils.CacheTimings, " ", err)
- }
if err := m.migrateFilters(); err != nil {
log.Print("ERROR: ", utils.MetaFilters, " ", err)
}
@@ -205,9 +198,6 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
if err := m.migrateTPresources(); err != nil {
log.Print("ERROR: ", utils.MetaTpResources, " ", err)
}
- if err := m.migrateTpTimings(); err != nil {
- log.Print("ERROR: ", utils.MetaTpTimings, " ", err)
- }
if err := m.migrateTPChargers(); err != nil {
log.Print("ERROR: ", utils.MetaTpChargers, " ", err)
}
diff --git a/migrator/timings.go b/migrator/timings.go
deleted file mode 100644
index 0ec4513b8..000000000
--- a/migrator/timings.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentTiming() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.TimingsPrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- idg := strings.TrimPrefix(id, utils.TimingsPrefix)
- tm, err := m.dmIN.DataManager().GetTiming(idg, true, utils.NonTransactional)
- if err != nil {
- return err
- }
- if tm == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetTiming(context.TODO(), tm); err != nil {
- return err
- }
- m.stats[utils.Timing]++
- }
- return
-}
-
-func (m *Migrator) migrateTimings() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.Timing); err != nil {
- return
- }
- switch version := vrs[utils.Timing]; version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.Timing]:
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentTiming(); err != nil {
- return err
- }
- }
- return m.ensureIndexesDataDB(engine.ColTmg)
-}
diff --git a/migrator/tp_timings.go b/migrator/tp_timings.go
deleted file mode 100644
index aac74f3f4..000000000
--- a/migrator/tp_timings.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentTPTiming() (err error) {
- tpids, err := m.storDBIn.StorDB().GetTpIds(utils.TBLTPTimings)
- if err != nil {
- return err
- }
-
- for _, tpid := range tpids {
- ids, err := m.storDBIn.StorDB().GetTpTableIds(tpid, utils.TBLTPTimings,
- []string{"tag"}, map[string]string{}, nil)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tm, err := m.storDBIn.StorDB().GetTPTimings(tpid, id)
- if err != nil {
- return err
- }
- if tm != nil {
- if m.dryRun != true {
- if err := m.storDBOut.StorDB().SetTPTimings(tm); err != nil {
- return err
- }
- for _, timing := range tm {
- if err := m.storDBIn.StorDB().RemTpData(utils.TBLTPTimings,
- timing.TPid, map[string]string{"tag": timing.ID}); err != nil {
- return err
- }
- }
- m.stats[utils.TpTiming] += 1
- }
- }
- }
- }
- return
-}
-
-func (m *Migrator) migrateTpTimings() (err error) {
- var vrs engine.Versions
- current := engine.CurrentStorDBVersions()
- if vrs, err = m.getVersions(utils.TpTiming); err != nil {
- return
- }
- switch vrs[utils.TpTiming] {
- case current[utils.TpTiming]:
- if m.sameStorDB {
- break
- }
- if err := m.migrateCurrentTPTiming(); err != nil {
- return err
- }
- }
- return m.ensureIndexesStorDB(utils.TBLTPTimings)
-}
diff --git a/migrator/tp_timings_it_test.go b/migrator/tp_timings_it_test.go
deleted file mode 100644
index bb23504c1..000000000
--- a/migrator/tp_timings_it_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// +build integration
-
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "log"
- "path"
- "reflect"
- "testing"
-
- "github.com/cgrates/cgrates/config"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-var (
- tpTimPathIn string
- tpTimPathOut string
- tpTimCfgIn *config.CGRConfig
- tpTimCfgOut *config.CGRConfig
- tpTimMigrator *Migrator
- tpTimings []*utils.ApierTPTiming
-)
-
-var sTestsTpTimIT = []func(t *testing.T){
- testTpTimITConnect,
- testTpTimITFlush,
- testTpTimITPopulate,
- testTpTimITMove,
- testTpTimITCheckData,
-}
-
-func TestTpTimMove(t *testing.T) {
- for _, stest := range sTestsTpTimIT {
- t.Run("TestTpTimMove", stest)
- }
- tpTimMigrator.Close()
-}
-
-func testTpTimITConnect(t *testing.T) {
- var err error
- tpTimPathIn = path.Join(*dataDir, "conf", "samples", "tutmongo")
- tpTimCfgIn, err = config.NewCGRConfigFromPath(tpTimPathIn)
- if err != nil {
- t.Fatal(err)
- }
- tpTimPathOut = path.Join(*dataDir, "conf", "samples", "tutmysql")
- tpTimCfgOut, err = config.NewCGRConfigFromPath(tpTimPathOut)
- if err != nil {
- t.Fatal(err)
- }
- storDBIn, err := NewMigratorStorDB(tpTimCfgIn.StorDbCfg().Type,
- tpTimCfgIn.StorDbCfg().Host, tpTimCfgIn.StorDbCfg().Port,
- tpTimCfgIn.StorDbCfg().Name, tpTimCfgIn.StorDbCfg().User,
- tpTimCfgIn.StorDbCfg().Password, tpTimCfgIn.GeneralCfg().DBDataEncoding,
- tpTimCfgIn.StorDbCfg().StringIndexedFields, tpTimCfgIn.StorDbCfg().PrefixIndexedFields,
- tpTimCfgIn.StorDbCfg().Opts)
- if err != nil {
- log.Fatal(err)
- }
- storDBOut, err := NewMigratorStorDB(tpTimCfgOut.StorDbCfg().Type,
- tpTimCfgOut.StorDbCfg().Host, tpTimCfgOut.StorDbCfg().Port,
- tpTimCfgOut.StorDbCfg().Name, tpTimCfgOut.StorDbCfg().User,
- tpTimCfgOut.StorDbCfg().Password, tpTimCfgOut.GeneralCfg().DBDataEncoding,
- tpTimCfgIn.StorDbCfg().StringIndexedFields, tpTimCfgIn.StorDbCfg().PrefixIndexedFields,
- tpTimCfgOut.StorDbCfg().Opts)
- if err != nil {
- log.Fatal(err)
- }
- tpTimMigrator, err = NewMigrator(nil, nil, storDBIn, storDBOut, false, false, false, false)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func testTpTimITFlush(t *testing.T) {
- if err := tpTimMigrator.storDBIn.StorDB().Flush(
- path.Join(tpTimCfgIn.DataFolderPath, "storage", tpTimCfgIn.StorDbCfg().Type)); err != nil {
- t.Error(err)
- }
-
- if err := tpTimMigrator.storDBOut.StorDB().Flush(
- path.Join(tpTimCfgOut.DataFolderPath, "storage", tpTimCfgOut.StorDbCfg().Type)); err != nil {
- t.Error(err)
- }
-}
-
-func testTpTimITPopulate(t *testing.T) {
- tpTimings = []*utils.ApierTPTiming{
- {
- TPid: "TPT1",
- ID: "Timing",
- Years: "2017",
- Months: "05",
- MonthDays: "01",
- WeekDays: "1",
- Time: "15:00:00Z",
- },
- }
- if err := tpTimMigrator.storDBIn.StorDB().SetTPTimings(tpTimings); err != nil {
- t.Error("Error when setting TpTimings ", err.Error())
- }
- currentVersion := engine.CurrentStorDBVersions()
- err := tpTimMigrator.storDBIn.StorDB().SetVersions(currentVersion, false)
- if err != nil {
- t.Error("Error when setting version for TpTimings ", err.Error())
- }
-}
-
-func testTpTimITMove(t *testing.T) {
- err, _ := tpTimMigrator.Migrate([]string{utils.MetaTpTimings})
- if err != nil {
- t.Error("Error when migrating TpTimings ", err.Error())
- }
-}
-
-func testTpTimITCheckData(t *testing.T) {
- result, err := tpTimMigrator.storDBOut.StorDB().GetTPTimings(
- tpTimings[0].TPid, tpTimings[0].ID)
- if err != nil {
- t.Error("Error when getting TpTimings ", err.Error())
- }
- if !reflect.DeepEqual(tpTimings[0], result[0]) {
- t.Errorf("Expecting: %+v, received: %+v", tpTimings[0], result[0])
- }
- result, err = tpTimMigrator.storDBIn.StorDB().GetTPTimings(
- tpTimings[0].TPid, tpTimings[0].ID)
- if err != utils.ErrNotFound {
- t.Error(err)
- }
-}
diff --git a/utils/apitpdata.go b/utils/apitpdata.go
index 2386106b0..5b0c47874 100644
--- a/utils/apitpdata.go
+++ b/utils/apitpdata.go
@@ -802,7 +802,6 @@ func NewAttrReloadCacheWithOpts() *AttrReloadCacheWithAPIOpts {
DispatcherProfileIDs: {MetaAny},
DispatcherHostIDs: {MetaAny},
RateProfileIDs: {MetaAny},
- TimingIDs: {MetaAny},
AttributeFilterIndexIDs: {MetaAny},
ResourceFilterIndexIDs: {MetaAny},
StatFilterIndexIDs: {MetaAny},
diff --git a/utils/consts.go b/utils/consts.go
index 7827de174..0e7a73722 100644
--- a/utils/consts.go
+++ b/utils/consts.go
@@ -47,7 +47,7 @@ var (
CacheCDRIDs, CacheRPCConnections, CacheUCH, CacheSTIR, CacheEventCharges, MetaAPIBan,
CacheCapsEvents, CacheVersions, CacheReplicationHosts})
- dataDBPartition = NewStringSet([]string{CacheTimings,
+ dataDBPartition = NewStringSet([]string{
CacheResourceProfiles, CacheResources, CacheEventResources, CacheStatQueueProfiles, CacheStatQueues,
CacheThresholdProfiles, CacheThresholds, CacheFilters, CacheRouteProfiles, CacheAttributeProfiles,
CacheChargerProfiles, CacheActionProfiles, CacheDispatcherProfiles, CacheDispatcherHosts,
@@ -57,7 +57,7 @@ var (
CacheActionProfilesFilterIndexes, CacheAccountsFilterIndexes, CacheReverseFilterIndexes,
CacheAccounts})
- storDBPartition = NewStringSet([]string{CacheTBLTPTimings,
+ storDBPartition = NewStringSet([]string{
CacheTBLTPResources, CacheTBLTPStats, CacheTBLTPThresholds, CacheTBLTPFilters, CacheSessionCostsTBL, CacheCDRsTBL,
CacheTBLTPRoutes, CacheTBLTPAttributes, CacheTBLTPChargers, CacheTBLTPDispatchers,
CacheTBLTPDispatcherHosts, CacheTBLTPRateProfiles, CacheTBLTPActionProfiles, CacheTBLTPAccounts})
@@ -68,7 +68,6 @@ var (
CacheInstanceToPrefix = map[string]string{
CacheResourceProfiles: ResourceProfilesPrefix,
CacheResources: ResourcesPrefix,
- CacheTimings: TimingsPrefix,
CacheStatQueueProfiles: StatQueueProfilePrefix,
CacheStatQueues: StatQueuePrefix,
CacheThresholdProfiles: ThresholdProfilePrefix,
@@ -129,7 +128,6 @@ var (
}
CacheStorDBPartitions = map[string]string{
- TBLTPTimings: CacheTBLTPTimings,
TBLTPResources: CacheTBLTPResources,
TBLTPStats: CacheTBLTPStats,
TBLTPThresholds: CacheTBLTPThresholds,
@@ -165,7 +163,6 @@ var (
RateProfileIDs: RateProfilePrefix,
ActionProfileIDs: ActionProfilePrefix,
- TimingIDs: TimingsPrefix,
AttributeFilterIndexIDs: AttributeFilterIndexes,
ResourceFilterIndexIDs: ResourceFilterIndexes,
StatFilterIndexIDs: StatFilterIndexes,
@@ -197,7 +194,6 @@ var (
RateProfileIDs: CacheRateProfiles,
ActionProfileIDs: CacheActionProfiles,
- TimingIDs: CacheTimings,
AttributeFilterIndexIDs: CacheAttributeFilterIndexes,
ResourceFilterIndexIDs: CacheResourceFilterIndexes,
StatFilterIndexIDs: CacheStatFilterIndexes,
@@ -337,7 +333,6 @@ const (
ResourcesPrefix = "res_"
ResourceProfilesPrefix = "rsp_"
ThresholdPrefix = "thd_"
- TimingsPrefix = "tmg_"
FilterPrefix = "ftr_"
CDRsStatsPrefix = "cst_"
VersionPrefix = "ver_"
@@ -480,7 +475,6 @@ const (
BalanceMap = "BalanceMap"
UnitCounters = "UnitCounters"
UpdateTime = "UpdateTime"
- Timings = "Timings"
Rates = "Rates"
//DestinationRates = "DestinationRates"
RatingPlans = "RatingPlans"
@@ -550,7 +544,6 @@ const (
Action = "Action"
SessionSCosts = "SessionSCosts"
- Timing = "Timing"
RQF = "RQF"
Resource = "Resource"
User = "User"
@@ -611,7 +604,6 @@ const (
RateFixedFee = "RateFixedFee"
RateRecurrentFee = "RateRecurrentFee"
RateBlocker = "RateBlocker"
- TimingID = "TimingID"
RatesID = "RatesID"
RatingFiltersID = "RatingFiltersID"
AccountingID = "AccountingID"
@@ -833,7 +825,6 @@ const (
Loadid = "loadid"
ActionPlan = "ActionPlan"
ActionsId = "ActionsId"
- TimingId = "TimingId"
Prefixes = "Prefixes"
RateSlots = "RateSlots"
RatingPlanBindings = "RatingPlanBindings"
@@ -994,7 +985,6 @@ const (
MetaTpActionProfiles = "*tp_action_profiles"
MetaTpRateProfiles = "*tp_rate_profiles"
MetaTpResources = "*tp_resources"
- MetaTpTimings = "*tp_timings"
MetaTpChargers = "*tp_chargers"
MetaTpDispatchers = "*tp_dispatchers"
MetaDurationSeconds = "*duration_seconds"
@@ -1015,7 +1005,6 @@ const (
TpAttributes = "TpAttributes"
TpStats = "TpStats"
TpResources = "TpResources"
- TpTiming = "TpTiming"
TpResource = "TpResource"
TpChargers = "TpChargers"
TpDispatchers = "TpDispatchers"
@@ -1092,7 +1081,6 @@ const (
ReplicatorSv1GetThreshold = "ReplicatorSv1.GetThreshold"
ReplicatorSv1GetThresholdProfile = "ReplicatorSv1.GetThresholdProfile"
ReplicatorSv1GetStatQueueProfile = "ReplicatorSv1.GetStatQueueProfile"
- ReplicatorSv1GetTiming = "ReplicatorSv1.GetTiming"
ReplicatorSv1GetResource = "ReplicatorSv1.GetResource"
ReplicatorSv1GetResourceProfile = "ReplicatorSv1.GetResourceProfile"
ReplicatorSv1GetActions = "ReplicatorSv1.GetActions"
@@ -1110,7 +1098,6 @@ const (
ReplicatorSv1SetStatQueue = "ReplicatorSv1.SetStatQueue"
ReplicatorSv1SetFilter = "ReplicatorSv1.SetFilter"
ReplicatorSv1SetStatQueueProfile = "ReplicatorSv1.SetStatQueueProfile"
- ReplicatorSv1SetTiming = "ReplicatorSv1.SetTiming"
ReplicatorSv1SetResource = "ReplicatorSv1.SetResource"
ReplicatorSv1SetResourceProfile = "ReplicatorSv1.SetResourceProfile"
ReplicatorSv1SetActions = "ReplicatorSv1.SetActions"
@@ -1129,7 +1116,6 @@ const (
ReplicatorSv1RemoveFilter = "ReplicatorSv1.RemoveFilter"
ReplicatorSv1RemoveThresholdProfile = "ReplicatorSv1.RemoveThresholdProfile"
ReplicatorSv1RemoveStatQueueProfile = "ReplicatorSv1.RemoveStatQueueProfile"
- ReplicatorSv1RemoveTiming = "ReplicatorSv1.RemoveTiming"
ReplicatorSv1RemoveResource = "ReplicatorSv1.RemoveResource"
ReplicatorSv1RemoveResourceProfile = "ReplicatorSv1.RemoveResourceProfile"
ReplicatorSv1RemoveActions = "ReplicatorSv1.RemoveActions"
@@ -1257,10 +1243,6 @@ const (
// APIerSv1 TP APIs
const (
- APIerSv1SetTPTiming = "APIerSv1.SetTPTiming"
- APIerSv1GetTPTiming = "APIerSv1.GetTPTiming"
- APIerSv1RemoveTPTiming = "APIerSv1.RemoveTPTiming"
- APIerSv1GetTPTimingIds = "APIerSv1.GetTPTimingIds"
APIerSv1LoadTariffPlanFromStorDb = "APIerSv1.LoadTariffPlanFromStorDb"
APIerSv1RemoveTPFromFolder = "APIerSv1.RemoveTPFromFolder"
)
@@ -1552,8 +1534,6 @@ const (
//CSV file name
const (
- TimingsCsv = "Timings.csv"
-
ResourcesCsv = "Resources.csv"
StatsCsv = "Stats.csv"
ThresholdsCsv = "Thresholds.csv"
@@ -1570,8 +1550,6 @@ const (
// Table Name
const (
- TBLTPTimings = "tp_timings"
-
TBLTPResources = "tp_resources"
TBLTPStats = "tp_stats"
TBLTPThresholds = "tp_thresholds"
@@ -1594,7 +1572,6 @@ const (
const (
CacheResources = "*resources"
CacheResourceProfiles = "*resource_profiles"
- CacheTimings = "*timings"
CacheEventResources = "*event_resources"
CacheStatQueueProfiles = "*statqueue_profiles"
CacheStatQueues = "*statqueues"
@@ -1640,7 +1617,6 @@ const (
CacheReplicationHosts = "*replication_hosts"
// storDB
- CacheTBLTPTimings = "*tp_timings"
CacheTBLTPResources = "*tp_resources"
CacheTBLTPStats = "*tp_stats"
@@ -2264,7 +2240,6 @@ const (
DispatcherRoutesIDs = "DispatcherRoutesIDs"
RateProfileIDs = "RateProfileIDs"
ActionProfileIDs = "ActionProfileIDs"
- TimingIDs = "TimingIDs"
AttributeFilterIndexIDs = "AttributeFilterIndexIDs"
ResourceFilterIndexIDs = "ResourceFilterIndexIDs"
StatFilterIndexIDs = "StatFilterIndexIDs"