diff --git a/admins/accounts.go b/admins/accounts.go
index 4b3ee6884..918a6aef9 100644
--- a/admins/accounts.go
+++ b/admins/accounts.go
@@ -55,8 +55,12 @@ func (admS *AdminS) V1GetAccountIDs(ctx *context.Context, args *utils.ArgsItemID
prfx := utils.AccountPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -104,8 +108,12 @@ func (admS *AdminS) V1GetAccountsCount(ctx *context.Context, args *utils.ArgsIte
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.AccountPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/actions.go b/admins/actions.go
index 6660d3967..b56b1e832 100644
--- a/admins/actions.go
+++ b/admins/actions.go
@@ -55,8 +55,12 @@ func (admS *AdminS) V1GetActionProfileIDs(ctx *context.Context, args *utils.Args
prfx := utils.ActionProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -104,8 +108,12 @@ func (admS *AdminS) V1GetActionProfilesCount(ctx *context.Context, args *utils.A
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ActionProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/admins.go b/admins/admins.go
index a9bab29bc..5e2fafa0d 100644
--- a/admins/admins.go
+++ b/admins/admins.go
@@ -23,12 +23,10 @@ import (
"github.com/cgrates/cgrates/engine"
)
-func NewAdminS(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.ConnManager, fltrS *engine.FilterS,
- storDB engine.StorDB) *AdminS {
+func NewAdminS(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.ConnManager, fltrS *engine.FilterS) *AdminS {
return &AdminS{
cfg: cfg,
dm: dm,
- storDB: storDB,
connMgr: connMgr,
fltrS: fltrS,
}
@@ -37,7 +35,6 @@ func NewAdminS(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.Co
type AdminS struct {
cfg *config.CGRConfig
dm *engine.DataManager
- storDB engine.StorDB
connMgr *engine.ConnManager
fltrS *engine.FilterS
}
diff --git a/admins/attributes.go b/admins/attributes.go
index 0e7ce9ab9..6f3c56087 100644
--- a/admins/attributes.go
+++ b/admins/attributes.go
@@ -53,8 +53,12 @@ func (admS *AdminS) V1GetAttributeProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.AttributeProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -103,8 +107,12 @@ func (admS *AdminS) V1GetAttributeProfilesCount(ctx *context.Context, args *util
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.AttributeProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/cdrs.go b/admins/cdrs.go
index 60d9f27c8..158289a96 100644
--- a/admins/cdrs.go
+++ b/admins/cdrs.go
@@ -36,7 +36,7 @@ func (admS AdminS) V1GetCDRs(ctx *context.Context, args *utils.CDRFilters, reply
if err != nil {
return fmt.Errorf("preparing filters failed: %w", err)
}
- cdrs, err := admS.storDB.GetCDRs(ctx, fltrs, args.APIOpts)
+ cdrs, err := admS.dm.GetCDRs(ctx, fltrs, args.APIOpts)
if err != nil {
return fmt.Errorf("retrieving CDRs failed: %w", err)
}
@@ -53,7 +53,7 @@ func (admS AdminS) V1RemoveCDRs(ctx *context.Context, args *utils.CDRFilters, re
if err != nil {
return fmt.Errorf("preparing filters failed: %w", err)
}
- if err := admS.storDB.RemoveCDRs(ctx, fltrs); err != nil {
+ if err := admS.dm.RemoveCDRs(ctx, fltrs); err != nil {
return fmt.Errorf("removing CDRs failed: %w", err)
}
*reply = utils.OK
diff --git a/admins/chargers.go b/admins/chargers.go
index d2e7de9de..1e0ffac02 100644
--- a/admins/chargers.go
+++ b/admins/chargers.go
@@ -53,8 +53,12 @@ func (adms *AdminS) V1GetChargerProfileIDs(ctx *context.Context, args *utils.Arg
prfx := utils.ChargerProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (admS *AdminS) V1GetChargerProfilesCount(ctx *context.Context, args *utils.
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ChargerProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/filters.go b/admins/filters.go
index 1b276e35e..411314fdc 100644
--- a/admins/filters.go
+++ b/admins/filters.go
@@ -133,8 +133,12 @@ func (adms *AdminS) V1GetFilterIDs(ctx *context.Context, args *utils.ArgsItemIDs
prfx := utils.FilterPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -192,8 +196,12 @@ func (admS *AdminS) V1GetFiltersCount(ctx *context.Context, args *utils.ArgsItem
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.FilterPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/ips.go b/admins/ips.go
index 601cd25ec..84c7214e4 100644
--- a/admins/ips.go
+++ b/admins/ips.go
@@ -52,7 +52,11 @@ func (s *AdminS) V1GetIPProfileIDs(ctx *context.Context, args *utils.ArgsItemIDs
prfx := utils.IPProfilesPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
- keys, err := s.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := s.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
@@ -100,7 +104,11 @@ func (s *AdminS) V1GetIPProfilesCount(ctx *context.Context, args *utils.ArgsItem
tnt = s.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.IPProfilesPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
- keys, err := s.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := s.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
diff --git a/admins/rankings.go b/admins/rankings.go
index 11c093556..5b97c5121 100644
--- a/admins/rankings.go
+++ b/admins/rankings.go
@@ -53,8 +53,12 @@ func (a *AdminS) V1GetRankingProfileIDs(ctx *context.Context, args *utils.ArgsIt
prfx := utils.RankingProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := a.dm.DBConns().GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = a.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (a *AdminS) V1GetRankingProfilesCount(ctx *context.Context, args *utils.Arg
tnt = a.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RankingProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := a.dm.DBConns().GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = a.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/rates.go b/admins/rates.go
index 21ce3131e..ef341737e 100644
--- a/admins/rates.go
+++ b/admins/rates.go
@@ -107,8 +107,12 @@ func (admS *AdminS) V1GetRateProfileIDs(ctx *context.Context, args *utils.ArgsIt
prfx := utils.RateProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -180,8 +184,12 @@ func (admS *AdminS) V1GetRateProfilesCount(ctx *context.Context, args *utils.Arg
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RateProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/resources.go b/admins/resources.go
index f07428a75..1015b08ee 100644
--- a/admins/resources.go
+++ b/admins/resources.go
@@ -52,8 +52,12 @@ func (adms *AdminS) V1GetResourceProfileIDs(ctx *context.Context, args *utils.Ar
prfx := utils.ResourceProfilesPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -101,8 +105,12 @@ func (admS *AdminS) V1GetResourceProfilesCount(ctx *context.Context, args *utils
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ResourceProfilesPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/routes.go b/admins/routes.go
index a3bbb521a..3b3d937aa 100644
--- a/admins/routes.go
+++ b/admins/routes.go
@@ -52,8 +52,12 @@ func (adms *AdminS) V1GetRouteProfileIDs(ctx *context.Context, args *utils.ArgsI
prfx := utils.RouteProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -101,8 +105,12 @@ func (adms *AdminS) V1GetRouteProfilesCount(ctx *context.Context, args *utils.Ar
tnt = adms.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RouteProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/stats.go b/admins/stats.go
index e2ba3d03e..dbc2dee1a 100644
--- a/admins/stats.go
+++ b/admins/stats.go
@@ -53,8 +53,12 @@ func (adms *AdminS) V1GetStatQueueProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.StatQueueProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (admS *AdminS) V1GetStatQueueProfilesCount(ctx *context.Context, args *util
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.StatQueueProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/thresholds.go b/admins/thresholds.go
index 1e011546d..f3626108a 100644
--- a/admins/thresholds.go
+++ b/admins/thresholds.go
@@ -53,8 +53,12 @@ func (adms *AdminS) V1GetThresholdProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.ThresholdProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (adms *AdminS) V1GetThresholdProfilesCount(ctx *context.Context, args *util
tnt = adms.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ThresholdProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/admins/trends.go b/admins/trends.go
index 87a0aacfd..df0973c76 100644
--- a/admins/trends.go
+++ b/admins/trends.go
@@ -49,8 +49,12 @@ func (a *AdminS) V1GetTrendProfileIDs(ctx *context.Context, args *utils.ArgsItem
prfx := utils.TrendProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := a.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = a.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -98,8 +102,12 @@ func (a *AdminS) V1GetTrendProfilesCount(ctx *context.Context, args *utils.ArgsI
tnt = a.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.TrendProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := a.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = a.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/accounts.go b/apis/accounts.go
index afeb22e6f..9acf5d449 100644
--- a/apis/accounts.go
+++ b/apis/accounts.go
@@ -56,8 +56,12 @@ func (admS *AdminSv1) GetAccountIDs(ctx *context.Context, args *utils.ArgsItemID
prfx := utils.AccountPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -105,8 +109,12 @@ func (admS *AdminSv1) GetAccountsCount(ctx *context.Context, args *utils.ArgsIte
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.AccountPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/actions.go b/apis/actions.go
index 14546e2fa..1d54849eb 100644
--- a/apis/actions.go
+++ b/apis/actions.go
@@ -56,8 +56,12 @@ func (admS *AdminSv1) GetActionProfileIDs(ctx *context.Context, args *utils.Args
prfx := utils.ActionProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -105,8 +109,12 @@ func (admS *AdminSv1) GetActionProfilesCount(ctx *context.Context, args *utils.A
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ActionProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/admins.go b/apis/admins.go
index ec1801617..789e8c6dd 100644
--- a/apis/admins.go
+++ b/apis/admins.go
@@ -23,12 +23,10 @@ import (
"github.com/cgrates/cgrates/engine"
)
-func NewAdminSv1(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.ConnManager, fltrS *engine.FilterS,
- storDB engine.StorDB) *AdminSv1 {
+func NewAdminSv1(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.ConnManager, fltrS *engine.FilterS) *AdminSv1 {
return &AdminSv1{
cfg: cfg,
dm: dm,
- storDB: storDB,
connMgr: connMgr,
fltrS: fltrS,
}
@@ -37,7 +35,6 @@ func NewAdminSv1(cfg *config.CGRConfig, dm *engine.DataManager, connMgr *engine.
type AdminSv1 struct {
cfg *config.CGRConfig
dm *engine.DataManager
- storDB engine.StorDB
connMgr *engine.ConnManager
fltrS *engine.FilterS
ping
diff --git a/apis/attributes.go b/apis/attributes.go
index 8f9601987..cade41a2e 100644
--- a/apis/attributes.go
+++ b/apis/attributes.go
@@ -54,8 +54,12 @@ func (admS *AdminSv1) GetAttributeProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.AttributeProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -104,8 +108,12 @@ func (admS *AdminSv1) GetAttributeProfilesCount(ctx *context.Context, args *util
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.AttributeProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/cdrs.go b/apis/cdrs.go
index 0313a87c4..25aa1bbc2 100644
--- a/apis/cdrs.go
+++ b/apis/cdrs.go
@@ -37,7 +37,7 @@ func (admS AdminSv1) GetCDRs(ctx *context.Context, args *utils.CDRFilters, reply
if err != nil {
return fmt.Errorf("preparing filters failed: %w", err)
}
- cdrs, err := admS.storDB.GetCDRs(ctx, fltrs, args.APIOpts)
+ cdrs, err := admS.dm.GetCDRs(ctx, fltrs, args.APIOpts)
if err != nil {
return fmt.Errorf("retrieving CDRs failed: %w", err)
}
@@ -54,7 +54,7 @@ func (admS AdminSv1) RemoveCDRs(ctx *context.Context, args *utils.CDRFilters, re
if err != nil {
return fmt.Errorf("preparing filters failed: %w", err)
}
- if err := admS.storDB.RemoveCDRs(ctx, fltrs); err != nil {
+ if err := admS.dm.RemoveCDRs(ctx, fltrs); err != nil {
return fmt.Errorf("removing CDRs failed: %w", err)
}
*reply = utils.OK
diff --git a/apis/chargers.go b/apis/chargers.go
index 7ed2acbbe..a67aa0f8c 100644
--- a/apis/chargers.go
+++ b/apis/chargers.go
@@ -54,8 +54,12 @@ func (adms *AdminSv1) GetChargerProfileIDs(ctx *context.Context, args *utils.Arg
prfx := utils.ChargerProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -103,8 +107,12 @@ func (admS *AdminSv1) GetChargerProfilesCount(ctx *context.Context, args *utils.
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ChargerProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/datadb.go b/apis/datadb.go
index 78fa8e7fd..cf9e63f42 100644
--- a/apis/datadb.go
+++ b/apis/datadb.go
@@ -24,7 +24,7 @@ import (
// DumpDataDB will dump all of datadb from memory to a file
func (adms *AdminSv1) DumpDataDB(ctx *context.Context, ignr *string, reply *string) (err error) {
- if err = adms.dm.DataDB().DumpDataDB(); err != nil {
+ if err = adms.dm.DumpDataDB(); err != nil {
return
}
*reply = utils.OK
@@ -33,7 +33,7 @@ func (adms *AdminSv1) DumpDataDB(ctx *context.Context, ignr *string, reply *stri
// Will rewrite every dump file of DataDB
func (adms *AdminSv1) RewriteDataDB(ctx *context.Context, ignr *string, reply *string) (err error) {
- if err = adms.dm.DataDB().RewriteDataDB(); err != nil {
+ if err = adms.dm.RewriteDataDB(); err != nil {
return
}
*reply = utils.OK
@@ -47,7 +47,7 @@ type DumpBackupParams struct {
// BackupDataDB will momentarely stop any dumping and rewriting in dataDB, until dump folder is backed up in folder path backupFolderPath. Making zip true will create a zip file in the path instead
func (adms *AdminSv1) BackupDataDB(ctx *context.Context, params DumpBackupParams, reply *string) (err error) {
- if err = adms.dm.DataDB().BackupDataDB(params.BackupFolderPath, params.Zip); err != nil {
+ if err = adms.dm.BackupDataDB(params.BackupFolderPath, params.Zip); err != nil {
return
}
*reply = utils.OK
diff --git a/apis/filters.go b/apis/filters.go
index 8131c820c..44b512bd8 100644
--- a/apis/filters.go
+++ b/apis/filters.go
@@ -128,8 +128,12 @@ func (adms *AdminSv1) GetFilterIDs(ctx *context.Context, args *utils.ArgsItemIDs
prfx := utils.FilterPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -182,8 +186,12 @@ func (admS *AdminSv1) GetFiltersCount(ctx *context.Context, args *utils.ArgsItem
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.FilterPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/ips.go b/apis/ips.go
index d496c5919..f81dc506a 100644
--- a/apis/ips.go
+++ b/apis/ips.go
@@ -53,7 +53,11 @@ func (s *AdminSv1) GetIPProfileIDs(ctx *context.Context, args *utils.ArgsItemIDs
prfx := utils.IPProfilesPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
- keys, err := s.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := s.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
@@ -101,7 +105,11 @@ func (s *AdminSv1) GetIPProfilesCount(ctx *context.Context, args *utils.ArgsItem
tnt = s.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.IPProfilesPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
- keys, err := s.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := s.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
diff --git a/apis/rankings.go b/apis/rankings.go
index ec2cd452b..6a83dfb7d 100644
--- a/apis/rankings.go
+++ b/apis/rankings.go
@@ -54,8 +54,12 @@ func (adms *AdminSv1) GetRankingProfileIDs(ctx *context.Context, args *utils.Arg
prfx := utils.RankingProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -103,8 +107,12 @@ func (admS *AdminSv1) GetRankingProfilesCount(ctx *context.Context, args *utils.
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RankingProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/rates.go b/apis/rates.go
index 260c877cc..5997d9d7b 100644
--- a/apis/rates.go
+++ b/apis/rates.go
@@ -108,8 +108,12 @@ func (admS *AdminSv1) GetRateProfileIDs(ctx *context.Context, args *utils.ArgsIt
prfx := utils.RateProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -181,8 +185,12 @@ func (admS *AdminSv1) GetRateProfilesCount(ctx *context.Context, args *utils.Arg
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RateProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/replicator.go b/apis/replicator.go
index ccab190bb..e94ac2834 100644
--- a/apis/replicator.go
+++ b/apis/replicator.go
@@ -45,7 +45,11 @@ type ReplicatorSv1 struct {
// GetAccount is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetAccount(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.Account) error {
engine.UpdateReplicationFilters(utils.AccountPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetAccountDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetAccountDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -56,7 +60,11 @@ func (rplSv1 *ReplicatorSv1) GetAccount(ctx *context.Context, tntID *utils.Tenan
// GetStatQueue is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetStatQueue(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *engine.StatQueue) error {
engine.UpdateReplicationFilters(utils.StatQueuePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetStatQueueDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetStatQueueDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -67,7 +75,11 @@ func (rplSv1 *ReplicatorSv1) GetStatQueue(ctx *context.Context, tntID *utils.Ten
// GetFilter is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetFilter(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *engine.Filter) error {
engine.UpdateReplicationFilters(utils.FilterPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetFilterDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetFilterDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -78,7 +90,11 @@ func (rplSv1 *ReplicatorSv1) GetFilter(ctx *context.Context, tntID *utils.Tenant
// GetThreshold is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetThreshold(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *engine.Threshold) error {
engine.UpdateReplicationFilters(utils.ThresholdPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetThresholdDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetThresholdDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -89,7 +105,11 @@ func (rplSv1 *ReplicatorSv1) GetThreshold(ctx *context.Context, tntID *utils.Ten
// GetThresholdProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetThresholdProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *engine.ThresholdProfile) error {
engine.UpdateReplicationFilters(utils.ThresholdProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetThresholdProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetThresholdProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -100,7 +120,11 @@ func (rplSv1 *ReplicatorSv1) GetThresholdProfile(ctx *context.Context, tntID *ut
// GetStatQueueProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetStatQueueProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *engine.StatQueueProfile) error {
engine.UpdateReplicationFilters(utils.StatQueueProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetStatQueueProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetStatQueueProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -111,7 +135,11 @@ func (rplSv1 *ReplicatorSv1) GetStatQueueProfile(ctx *context.Context, tntID *ut
// GetTrend is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetTrend(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.Trend) error {
engine.UpdateReplicationFilters(utils.TrendPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetTrendDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrends)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetTrendDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -126,7 +154,11 @@ func (rplSv1 *ReplicatorSv1) GetTrend(ctx *context.Context, tntID *utils.TenantI
// GetTrendProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetTrendProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.TrendProfile) error {
engine.UpdateReplicationFilters(utils.TrendProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetTrendProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetTrendProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -137,7 +169,11 @@ func (rplSv1 *ReplicatorSv1) GetTrendProfile(ctx *context.Context, tntID *utils.
// GetResource is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetResource(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.Resource) error {
engine.UpdateReplicationFilters(utils.ResourcesPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetResourceDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResources)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetResourceDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -148,7 +184,11 @@ func (rplSv1 *ReplicatorSv1) GetResource(ctx *context.Context, tntID *utils.Tena
// GetResourceProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetResourceProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.ResourceProfile) error {
engine.UpdateReplicationFilters(utils.ResourceProfilesPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetResourceProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetResourceProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -159,7 +199,11 @@ func (rplSv1 *ReplicatorSv1) GetResourceProfile(ctx *context.Context, tntID *uti
// GetIP is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetIP(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.IPAllocations) error {
engine.UpdateReplicationFilters(utils.IPAllocationsPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetIPAllocationsDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPAllocations)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetIPAllocationsDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -170,7 +214,11 @@ func (rplSv1 *ReplicatorSv1) GetIP(ctx *context.Context, tntID *utils.TenantIDWi
// GetIPProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetIPProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.IPProfile) error {
engine.UpdateReplicationFilters(utils.IPProfilesPrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetIPProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetIPProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -181,7 +229,11 @@ func (rplSv1 *ReplicatorSv1) GetIPProfile(ctx *context.Context, tntID *utils.Ten
// GetRouteProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetRouteProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.RouteProfile) error {
engine.UpdateReplicationFilters(utils.RouteProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetRouteProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetRouteProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -192,7 +244,11 @@ func (rplSv1 *ReplicatorSv1) GetRouteProfile(ctx *context.Context, tntID *utils.
// GetAttributeProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetAttributeProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.AttributeProfile) error {
engine.UpdateReplicationFilters(utils.AttributeProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetAttributeProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetAttributeProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -203,7 +259,11 @@ func (rplSv1 *ReplicatorSv1) GetAttributeProfile(ctx *context.Context, tntID *ut
// GetChargerProfile is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetChargerProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.ChargerProfile) error {
engine.UpdateReplicationFilters(utils.ChargerProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetChargerProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetChargerProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -214,7 +274,11 @@ func (rplSv1 *ReplicatorSv1) GetChargerProfile(ctx *context.Context, tntID *util
// GetItemLoadIDs is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetItemLoadIDs(ctx *context.Context, itemID *utils.StringWithAPIOpts, reply *map[string]int64) error {
engine.UpdateReplicationFilters(utils.LoadIDPrefix, itemID.Arg, utils.IfaceAsString(itemID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetItemLoadIDsDrv(ctx, itemID.Arg)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaLoadIDs)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetItemLoadIDsDrv(ctx, itemID.Arg)
if err != nil {
return err
}
@@ -225,7 +289,11 @@ func (rplSv1 *ReplicatorSv1) GetItemLoadIDs(ctx *context.Context, itemID *utils.
// GetIndexes is the remote method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) GetIndexes(ctx *context.Context, args *utils.GetIndexesArg, reply *map[string]utils.StringSet) error {
engine.UpdateReplicationFilters(utils.CacheInstanceToPrefix[args.IdxItmType], args.TntCtx, utils.IfaceAsString(args.APIOpts[utils.RemoteHostOpt]))
- indx, err := rplSv1.dm.DataDB().GetIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.IdxKey, utils.NonTransactional)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(args.IdxItmType)
+ if err != nil {
+ return err
+ }
+ indx, err := dataDB.GetIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.IdxKey, utils.NonTransactional)
if err != nil {
return err
}
@@ -235,7 +303,11 @@ func (rplSv1 *ReplicatorSv1) GetIndexes(ctx *context.Context, args *utils.GetInd
// SetAccount is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetAccount(ctx *context.Context, acc *utils.AccountWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetAccountDrv(ctx, acc.Account); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetAccountDrv(ctx, acc.Account); err != nil {
return
}
// the account doesn't have cache
@@ -245,7 +317,11 @@ func (rplSv1 *ReplicatorSv1) SetAccount(ctx *context.Context, acc *utils.Account
// SetThresholdProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetThresholdProfile(ctx *context.Context, th *engine.ThresholdProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetThresholdProfileDrv(ctx, th.ThresholdProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetThresholdProfileDrv(ctx, th.ThresholdProfile); err != nil {
return
}
// delay if needed before cache call
@@ -263,7 +339,11 @@ func (rplSv1 *ReplicatorSv1) SetThresholdProfile(ctx *context.Context, th *engin
// SetThreshold is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetThreshold(ctx *context.Context, th *engine.ThresholdWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetThresholdDrv(ctx, th.Threshold); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetThresholdDrv(ctx, th.Threshold); err != nil {
return
}
// delay if needed before cache call
@@ -281,7 +361,11 @@ func (rplSv1 *ReplicatorSv1) SetThreshold(ctx *context.Context, th *engine.Thres
// SetTrendProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetTrendProfile(ctx *context.Context, trp *utils.TrendProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetTrendProfileDrv(ctx, trp.TrendProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetTrendProfileDrv(ctx, trp.TrendProfile); err != nil {
return
}
// delay if needed before cache call
@@ -299,7 +383,11 @@ func (rplSv1 *ReplicatorSv1) SetTrendProfile(ctx *context.Context, trp *utils.Tr
// SetTrend is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetTrend(ctx *context.Context, tr *utils.TrendWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetTrendDrv(ctx, tr.Trend); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrends)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetTrendDrv(ctx, tr.Trend); err != nil {
return
}
// delay if needed before cache call
@@ -317,7 +405,11 @@ func (rplSv1 *ReplicatorSv1) SetTrend(ctx *context.Context, tr *utils.TrendWithA
// SetStatQueueProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetStatQueueProfile(ctx *context.Context, sq *engine.StatQueueProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetStatQueueProfileDrv(ctx, sq.StatQueueProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetStatQueueProfileDrv(ctx, sq.StatQueueProfile); err != nil {
return
}
// delay if needed before cache call
@@ -335,7 +427,11 @@ func (rplSv1 *ReplicatorSv1) SetStatQueueProfile(ctx *context.Context, sq *engin
// SetStatQueue is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetStatQueue(ctx *context.Context, sq *engine.StatQueueWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetStatQueueDrv(ctx, nil, sq.StatQueue); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetStatQueueDrv(ctx, nil, sq.StatQueue); err != nil {
return
}
// delay if needed before cache call
@@ -353,7 +449,11 @@ func (rplSv1 *ReplicatorSv1) SetStatQueue(ctx *context.Context, sq *engine.StatQ
// SetFilter is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetFilter(ctx *context.Context, fltr *engine.FilterWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetFilterDrv(ctx, fltr.Filter); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetFilterDrv(ctx, fltr.Filter); err != nil {
return
}
// delay if needed before cache call
@@ -371,7 +471,11 @@ func (rplSv1 *ReplicatorSv1) SetFilter(ctx *context.Context, fltr *engine.Filter
// SetResourceProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetResourceProfile(ctx *context.Context, rs *utils.ResourceProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetResourceProfileDrv(ctx, rs.ResourceProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetResourceProfileDrv(ctx, rs.ResourceProfile); err != nil {
return
}
// delay if needed before cache call
@@ -389,7 +493,11 @@ func (rplSv1 *ReplicatorSv1) SetResourceProfile(ctx *context.Context, rs *utils.
// SetResource is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetResource(ctx *context.Context, rs *utils.ResourceWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetResourceDrv(ctx, rs.Resource); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResources)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetResourceDrv(ctx, rs.Resource); err != nil {
return
}
// delay if needed before cache call
@@ -407,7 +515,11 @@ func (rplSv1 *ReplicatorSv1) SetResource(ctx *context.Context, rs *utils.Resourc
// SetIPProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetIPProfile(ctx *context.Context, ipp *utils.IPProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetIPProfileDrv(ctx, ipp.IPProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIPProfileDrv(ctx, ipp.IPProfile); err != nil {
return
}
// delay if needed before cache call
@@ -425,7 +537,11 @@ func (rplSv1 *ReplicatorSv1) SetIPProfile(ctx *context.Context, ipp *utils.IPPro
// SetIP is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetIP(ctx *context.Context, ip *utils.IPAllocationsWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetIPAllocationsDrv(ctx, ip.IPAllocations); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPAllocations)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIPAllocationsDrv(ctx, ip.IPAllocations); err != nil {
return
}
// delay if needed before cache call
@@ -443,7 +559,11 @@ func (rplSv1 *ReplicatorSv1) SetIP(ctx *context.Context, ip *utils.IPAllocations
// SetRouteProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetRouteProfile(ctx *context.Context, sp *utils.RouteProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetRouteProfileDrv(ctx, sp.RouteProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetRouteProfileDrv(ctx, sp.RouteProfile); err != nil {
return
}
// delay if needed before cache call
@@ -461,7 +581,11 @@ func (rplSv1 *ReplicatorSv1) SetRouteProfile(ctx *context.Context, sp *utils.Rou
// SetAttributeProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetAttributeProfile(ctx *context.Context, ap *utils.AttributeProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetAttributeProfileDrv(ctx, ap.AttributeProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetAttributeProfileDrv(ctx, ap.AttributeProfile); err != nil {
return
}
// delay if needed before cache call
@@ -479,7 +603,11 @@ func (rplSv1 *ReplicatorSv1) SetAttributeProfile(ctx *context.Context, ap *utils
// SetChargerProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetChargerProfile(ctx *context.Context, cp *utils.ChargerProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetChargerProfileDrv(ctx, cp.ChargerProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetChargerProfileDrv(ctx, cp.ChargerProfile); err != nil {
return
}
// delay if needed before cache call
@@ -497,7 +625,11 @@ func (rplSv1 *ReplicatorSv1) SetChargerProfile(ctx *context.Context, cp *utils.C
// SetLoadIDs is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetLoadIDs(ctx *context.Context, args *utils.LoadIDsWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetLoadIDsDrv(ctx, args.LoadIDs); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaLoadIDs)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetLoadIDsDrv(ctx, args.LoadIDs); err != nil {
return
}
lIDs := make([]string, 0, len(args.LoadIDs))
@@ -514,7 +646,11 @@ func (rplSv1 *ReplicatorSv1) SetLoadIDs(ctx *context.Context, args *utils.LoadID
// SetIndexes is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) SetIndexes(ctx *context.Context, args *utils.SetIndexesArg, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.Indexes, true, utils.NonTransactional); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(args.IdxItmType)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.Indexes, true, utils.NonTransactional); err != nil {
return
}
cIDs := make([]string, 0, len(args.Indexes))
@@ -531,7 +667,11 @@ func (rplSv1 *ReplicatorSv1) SetIndexes(ctx *context.Context, args *utils.SetInd
// RemoveTrend is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveTrend(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveTrendDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrends)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveTrendDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -549,7 +689,11 @@ func (rplSv1 *ReplicatorSv1) RemoveTrend(ctx *context.Context, args *utils.Tenan
// RemoveTrendProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveTrendProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemTrendProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemTrendProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -567,7 +711,11 @@ func (rplSv1 *ReplicatorSv1) RemoveTrendProfile(ctx *context.Context, args *util
// RemoveThreshold is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveThreshold(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveThresholdDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveThresholdDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -585,7 +733,11 @@ func (rplSv1 *ReplicatorSv1) RemoveThreshold(ctx *context.Context, args *utils.T
// RemoveAccount is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveAccount(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveAccountDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveAccountDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// the account doesn't have cache
@@ -595,7 +747,11 @@ func (rplSv1 *ReplicatorSv1) RemoveAccount(ctx *context.Context, args *utils.Ten
// RemoveStatQueue is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveStatQueue(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemStatQueueDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemStatQueueDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -613,7 +769,11 @@ func (rplSv1 *ReplicatorSv1) RemoveStatQueue(ctx *context.Context, args *utils.T
// RemoveFilter is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveFilter(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveFilterDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveFilterDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -631,7 +791,11 @@ func (rplSv1 *ReplicatorSv1) RemoveFilter(ctx *context.Context, args *utils.Tena
// RemoveThresholdProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveThresholdProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemThresholdProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemThresholdProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -649,7 +813,11 @@ func (rplSv1 *ReplicatorSv1) RemoveThresholdProfile(ctx *context.Context, args *
// RemoveStatQueueProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveStatQueueProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemStatQueueProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemStatQueueProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -667,7 +835,11 @@ func (rplSv1 *ReplicatorSv1) RemoveStatQueueProfile(ctx *context.Context, args *
// RemoveResource is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveResource(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveResourceDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResources)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveResourceDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -685,7 +857,11 @@ func (rplSv1 *ReplicatorSv1) RemoveResource(ctx *context.Context, args *utils.Te
// RemoveResourceProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveResourceProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveResourceProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveResourceProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -703,7 +879,11 @@ func (rplSv1 *ReplicatorSv1) RemoveResourceProfile(ctx *context.Context, args *u
// RemoveIP is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveIP(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveIPAllocationsDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPAllocations)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIPAllocationsDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -721,7 +901,11 @@ func (rplSv1 *ReplicatorSv1) RemoveIP(ctx *context.Context, args *utils.TenantID
// RemoveIPProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveIPProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveIPProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIPProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -739,7 +923,11 @@ func (rplSv1 *ReplicatorSv1) RemoveIPProfile(ctx *context.Context, args *utils.T
// RemoveRouteProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveRouteProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveRouteProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRouteProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -757,7 +945,11 @@ func (rplSv1 *ReplicatorSv1) RemoveRouteProfile(ctx *context.Context, args *util
// RemoveAttributeProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveAttributeProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveAttributeProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveAttributeProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -775,7 +967,11 @@ func (rplSv1 *ReplicatorSv1) RemoveAttributeProfile(ctx *context.Context, args *
// RemoveChargerProfile is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveChargerProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveChargerProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveChargerProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
@@ -793,7 +989,11 @@ func (rplSv1 *ReplicatorSv1) RemoveChargerProfile(ctx *context.Context, args *ut
// RemoveIndexes is the replication method coresponding to the dataDb driver method
func (rplSv1 *ReplicatorSv1) RemoveIndexes(ctx *context.Context, args *utils.GetIndexesArg, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.IdxKey); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(args.IdxItmType)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIndexesDrv(ctx, args.IdxItmType, args.TntCtx, args.IdxKey); err != nil {
return
}
// delay if needed before cache call
@@ -811,7 +1011,11 @@ func (rplSv1 *ReplicatorSv1) RemoveIndexes(ctx *context.Context, args *utils.Get
func (rplSv1 *ReplicatorSv1) GetRateProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.RateProfile) error {
engine.UpdateReplicationFilters(utils.RateProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetRateProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetRateProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -820,7 +1024,11 @@ func (rplSv1 *ReplicatorSv1) GetRateProfile(ctx *context.Context, tntID *utils.T
}
func (rplSv1 *ReplicatorSv1) GetActionProfile(ctx *context.Context, tntID *utils.TenantIDWithAPIOpts, reply *utils.ActionProfile) error {
engine.UpdateReplicationFilters(utils.ActionProfilePrefix, tntID.TenantID.TenantID(), utils.IfaceAsString(tntID.APIOpts[utils.RemoteHostOpt]))
- rcv, err := rplSv1.dm.DataDB().GetActionProfileDrv(ctx, tntID.Tenant, tntID.ID)
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
+ rcv, err := dataDB.GetActionProfileDrv(ctx, tntID.Tenant, tntID.ID)
if err != nil {
return err
}
@@ -837,7 +1045,11 @@ func (rplSv1 *ReplicatorSv1) SetRateProfile(ctx *context.Context, sp *utils.Rate
return
}
}
- if err = rplSv1.dm.DataDB().SetRateProfileDrv(ctx, sp.RateProfile, optOverwrite); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetRateProfileDrv(ctx, sp.RateProfile, optOverwrite); err != nil {
return
}
// delay if needed before cache call
@@ -853,7 +1065,11 @@ func (rplSv1 *ReplicatorSv1) SetRateProfile(ctx *context.Context, sp *utils.Rate
return
}
func (rplSv1 *ReplicatorSv1) SetActionProfile(ctx *context.Context, sp *utils.ActionProfileWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().SetActionProfileDrv(ctx, sp.ActionProfile); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetActionProfileDrv(ctx, sp.ActionProfile); err != nil {
return
}
// delay if needed before cache call
@@ -870,7 +1086,11 @@ func (rplSv1 *ReplicatorSv1) SetActionProfile(ctx *context.Context, sp *utils.Ac
}
func (rplSv1 *ReplicatorSv1) RemoveRateProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveRateProfileDrv(ctx, args.Tenant, args.ID, nil); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRateProfileDrv(ctx, args.Tenant, args.ID, nil); err != nil {
return
}
// delay if needed before cache call
@@ -887,7 +1107,11 @@ func (rplSv1 *ReplicatorSv1) RemoveRateProfile(ctx *context.Context, args *utils
}
func (rplSv1 *ReplicatorSv1) RemoveActionProfile(ctx *context.Context, args *utils.TenantIDWithAPIOpts, reply *string) (err error) {
- if err = rplSv1.dm.DataDB().RemoveActionProfileDrv(ctx, args.Tenant, args.ID); err != nil {
+ dataDB, _, err := rplSv1.dm.DBConns().GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveActionProfileDrv(ctx, args.Tenant, args.ID); err != nil {
return
}
// delay if needed before cache call
diff --git a/apis/resources.go b/apis/resources.go
index b6cb20f2a..1119c38a3 100644
--- a/apis/resources.go
+++ b/apis/resources.go
@@ -53,8 +53,12 @@ func (adms *AdminSv1) GetResourceProfileIDs(ctx *context.Context, args *utils.Ar
prfx := utils.ResourceProfilesPrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (admS *AdminSv1) GetResourceProfilesCount(ctx *context.Context, args *utils
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ResourceProfilesPrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/routes.go b/apis/routes.go
index 05d6c5cba..edf2c76c9 100644
--- a/apis/routes.go
+++ b/apis/routes.go
@@ -53,8 +53,12 @@ func (adms *AdminSv1) GetRouteProfileIDs(ctx *context.Context, args *utils.ArgsI
prfx := utils.RouteProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (adms *AdminSv1) GetRouteProfilesCount(ctx *context.Context, args *utils.Ar
tnt = adms.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.RouteProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/stats.go b/apis/stats.go
index 86ad0cdcd..a8d77e518 100644
--- a/apis/stats.go
+++ b/apis/stats.go
@@ -53,8 +53,12 @@ func (adms *AdminSv1) GetStatQueueProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.StatQueueProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (admS *AdminSv1) GetStatQueueProfilesCount(ctx *context.Context, args *util
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.StatQueueProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/stordb.go b/apis/stordb.go
deleted file mode 100644
index deafd81c0..000000000
--- a/apis/stordb.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-package apis
-
-import (
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/utils"
-)
-
-// DumpStorDB will dump all of stordb from memory to a file
-func (adms *AdminSv1) DumpStorDB(ctx *context.Context, ignr *string, reply *string) (err error) {
- if err = adms.storDB.DumpStorDB(); err != nil {
- return
- }
- *reply = utils.OK
- return
-}
-
-// Will rewrite every dump file of StorDB
-func (adms *AdminSv1) RewriteStorDB(ctx *context.Context, ignr *string, reply *string) (err error) {
- if err = adms.storDB.RewriteStorDB(); err != nil {
- return
- }
- *reply = utils.OK
- return
-}
-
-// BackupStorDB will momentarely stop any dumping and rewriting in storDB, until dump folder is backed up in folder path backupFolderPath. Making zip true will create a zip file in the path instead
-func (adms *AdminSv1) BackupStorDB(ctx *context.Context, params DumpBackupParams, reply *string) (err error) {
- if err = adms.storDB.BackupStorDB(params.BackupFolderPath, params.Zip); err != nil {
- return
- }
- *reply = utils.OK
- return
-}
diff --git a/apis/thresholds.go b/apis/thresholds.go
index 2ca52f581..ce4db40bf 100644
--- a/apis/thresholds.go
+++ b/apis/thresholds.go
@@ -53,8 +53,12 @@ func (adms *AdminSv1) GetThresholdProfileIDs(ctx *context.Context, args *utils.A
prfx := utils.ThresholdProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -102,8 +106,12 @@ func (adms *AdminSv1) GetThresholdProfilesCount(ctx *context.Context, args *util
tnt = adms.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ThresholdProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/apis/trends.go b/apis/trends.go
index 9c30f3250..e7c341aa3 100644
--- a/apis/trends.go
+++ b/apis/trends.go
@@ -50,8 +50,12 @@ func (adms *AdminSv1) GetTrendProfileIDs(ctx *context.Context, args *utils.ArgsI
prfx := utils.TrendProfilePrefix + tnt + utils.ConcatenatedKeySep
lenPrfx := len(prfx)
prfx += args.ItemsPrefix
+ dataDB, _, err := adms.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = adms.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return
}
if len(keys) == 0 {
@@ -99,8 +103,12 @@ func (admS *AdminSv1) GetTrendProfilesCount(ctx *context.Context, args *utils.Ar
tnt = admS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.TrendProfilePrefix + tnt + utils.ConcatenatedKeySep + args.ItemsPrefix
+ dataDB, _, err := admS.dm.DBConns().GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
var keys []string
- if keys, err = admS.dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if keys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return err
}
if len(keys) == 0 {
diff --git a/cdrs/apis.go b/cdrs/apis.go
index a0e54388c..002d1d0bf 100644
--- a/cdrs/apis.go
+++ b/cdrs/apis.go
@@ -131,7 +131,7 @@ func (cdrS *CDRServer) V1ProcessStoredEvents(ctx *context.Context, args *utils.C
if err != nil {
return fmt.Errorf("preparing filters failed: %w", err)
}
- cdrs, err := cdrS.db.GetCDRs(ctx, fltrs, args.APIOpts)
+ cdrs, err := cdrS.dm.GetCDRs(ctx, fltrs, args.APIOpts)
if err != nil {
return fmt.Errorf("retrieving CDRs failed: %w", err)
}
diff --git a/cdrs/cdrs.go b/cdrs/cdrs.go
index 230305696..f1d48e212 100644
--- a/cdrs/cdrs.go
+++ b/cdrs/cdrs.go
@@ -46,12 +46,10 @@ func newMapEventFromReqForm(r *http.Request) (mp engine.MapEvent, err error) {
}
// NewCDRServer is a constructor for CDRServer
-func NewCDRServer(cfg *config.CGRConfig, dm *engine.DataManager, filterS *engine.FilterS, connMgr *engine.ConnManager,
- storDB engine.StorDB) *CDRServer {
+func NewCDRServer(cfg *config.CGRConfig, dm *engine.DataManager, filterS *engine.FilterS, connMgr *engine.ConnManager) *CDRServer {
return &CDRServer{
cfg: cfg,
dm: dm,
- db: storDB,
guard: guardian.Guardian,
fltrS: filterS,
connMgr: connMgr,
@@ -62,7 +60,6 @@ func NewCDRServer(cfg *config.CGRConfig, dm *engine.DataManager, filterS *engine
type CDRServer struct {
cfg *config.CGRConfig
dm *engine.DataManager
- db engine.StorDB
guard *guardian.GuardianLocker
fltrS *engine.FilterS
connMgr *engine.ConnManager
@@ -324,13 +321,13 @@ func (cdrS *CDRServer) processEvents(ctx *context.Context, evs []*utils.CGREvent
cgrEv.APIOpts[utils.MetaCDRID] = utils.GetUniqueCDRID(cgrEv)
}
- if err := cdrS.db.SetCDR(ctx, cgrEv, false); err != nil {
+ if err := cdrS.dm.SetCDR(ctx, cgrEv, false); err != nil {
if err != utils.ErrExists || !rerate {
// ToDo: add refund logic
return nil, fmt.Errorf("storing CDR %s failed: %w", utils.ToJSON(cgrEv), err)
}
- if err = cdrS.db.SetCDR(ctx, cgrEv, true); err != nil {
+ if err = cdrS.dm.SetCDR(ctx, cgrEv, true); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> error: <%s> updating CDR %+v",
utils.CDRs, err.Error(), utils.ToJSON(cgrEv)))
diff --git a/cmd/cgr-engine/cgr-engine.go b/cmd/cgr-engine/cgr-engine.go
index 1fc206795..90c1bd867 100644
--- a/cmd/cgr-engine/cgr-engine.go
+++ b/cmd/cgr-engine/cgr-engine.go
@@ -211,7 +211,6 @@ func runCGREngine(fs []string) (err error) {
services.NewConnManagerService(cfg),
services.NewLoggerService(cfg, flags.logger.typ),
services.NewDataDBService(cfg, flags.data.setVersions),
- services.NewStorDBService(cfg, flags.data.setVersions),
services.NewConfigService(cfg),
services.NewGuardianService(cfg),
coreS,
@@ -329,7 +328,7 @@ func initConfigFromPath(ctx *context.Context, path, nodeID, logType string, logL
if d, err = engine.NewDataDBConn(cfg.ConfigDBCfg().Type,
cfg.ConfigDBCfg().Host, cfg.ConfigDBCfg().Port,
cfg.ConfigDBCfg().Name, cfg.ConfigDBCfg().User,
- cfg.ConfigDBCfg().Password, cfg.GeneralCfg().DBDataEncoding,
+ cfg.ConfigDBCfg().Password, cfg.GeneralCfg().DBDataEncoding, nil, nil,
cfg.ConfigDBCfg().Opts, nil); err != nil { // Cannot configure getter database, show stopper
err = fmt.Errorf("could not configure configDB: <%s>", err)
return
diff --git a/cmd/cgr-loader/cgr-loader.go b/cmd/cgr-loader/cgr-loader.go
index fece0cca8..da342f943 100755
--- a/cmd/cgr-loader/cgr-loader.go
+++ b/cmd/cgr-loader/cgr-loader.go
@@ -42,49 +42,49 @@ var (
cfgPath = cgrLoaderFlags.String(utils.CfgPathCgr, utils.EmptyString,
"Configuration directory path.")
printConfig = cgrLoaderFlags.Bool(utils.PrintCfgCgr, false, "Print the configuration object in JSON format")
- dataDBType = cgrLoaderFlags.String(utils.DataDBTypeCgr, dfltCfg.DataDbCfg().Type,
+ dataDBType = cgrLoaderFlags.String(utils.DataDBTypeCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].Type,
"The type of the DataDB database <*redis|*mongo>")
- dataDBHost = cgrLoaderFlags.String(utils.DataDBHostCgr, dfltCfg.DataDbCfg().Host,
+ dataDBHost = cgrLoaderFlags.String(utils.DataDBHostCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].Host,
"The DataDb host to connect to.")
- dataDBPort = cgrLoaderFlags.String(utils.DataDBPortCgr, dfltCfg.DataDbCfg().Port,
+ dataDBPort = cgrLoaderFlags.String(utils.DataDBPortCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].Port,
"The DataDb port to bind to.")
- dataDBName = cgrLoaderFlags.String(utils.DataDBNameCgr, dfltCfg.DataDbCfg().Name,
+ dataDBName = cgrLoaderFlags.String(utils.DataDBNameCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].Name,
"The name/number of the DataDb to connect to.")
- dataDBUser = cgrLoaderFlags.String(utils.DataDBUserCgr, dfltCfg.DataDbCfg().User,
+ dataDBUser = cgrLoaderFlags.String(utils.DataDBUserCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].User,
"The DataDb user to sign in as.")
- dataDBPasswd = cgrLoaderFlags.String(utils.DataDBPasswdCgr, dfltCfg.DataDbCfg().Password,
+ dataDBPasswd = cgrLoaderFlags.String(utils.DataDBPasswdCgr, dfltCfg.DbCfg().DBConns[utils.MetaDefault].Password,
"The DataDb user's password.")
dbDataEncoding = cgrLoaderFlags.String(utils.DBDataEncodingCfg, dfltCfg.GeneralCfg().DBDataEncoding,
"The encoding used to store object data in strings")
- dbRedisMaxConns = cgrLoaderFlags.Int(utils.RedisMaxConnsCfg, dfltCfg.DataDbCfg().Opts.RedisMaxConns,
+ dbRedisMaxConns = cgrLoaderFlags.Int(utils.RedisMaxConnsCfg, dfltCfg.DbCfg().Opts.RedisMaxConns,
"The connection pool size")
- dbRedisConnectAttempts = cgrLoaderFlags.Int(utils.RedisConnectAttemptsCfg, dfltCfg.DataDbCfg().Opts.RedisConnectAttempts,
+ dbRedisConnectAttempts = cgrLoaderFlags.Int(utils.RedisConnectAttemptsCfg, dfltCfg.DbCfg().Opts.RedisConnectAttempts,
"The maximum amount of dial attempts")
- dbRedisSentinel = cgrLoaderFlags.String(utils.RedisSentinelNameCfg, dfltCfg.DataDbCfg().Opts.RedisSentinel,
+ dbRedisSentinel = cgrLoaderFlags.String(utils.RedisSentinelNameCfg, dfltCfg.DbCfg().Opts.RedisSentinel,
"The name of redis sentinel")
dbRedisCluster = cgrLoaderFlags.Bool(utils.RedisClusterCfg, false,
"Is the redis datadb a cluster")
- dbRedisClusterSync = cgrLoaderFlags.Duration(utils.RedisClusterSyncCfg, dfltCfg.DataDbCfg().Opts.RedisClusterSync,
+ dbRedisClusterSync = cgrLoaderFlags.Duration(utils.RedisClusterSyncCfg, dfltCfg.DbCfg().Opts.RedisClusterSync,
"The sync interval for the redis cluster")
- dbRedisClusterDownDelay = cgrLoaderFlags.Duration(utils.RedisClusterOnDownDelayCfg, dfltCfg.DataDbCfg().Opts.RedisClusterOndownDelay,
+ dbRedisClusterDownDelay = cgrLoaderFlags.Duration(utils.RedisClusterOnDownDelayCfg, dfltCfg.DbCfg().Opts.RedisClusterOndownDelay,
"The delay before executing the commands if the redis cluster is in the CLUSTERDOWN state")
- dbRedisConnectTimeout = cgrLoaderFlags.Duration(utils.RedisConnectTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisConnectTimeout,
+ dbRedisConnectTimeout = cgrLoaderFlags.Duration(utils.RedisConnectTimeoutCfg, dfltCfg.DbCfg().Opts.RedisConnectTimeout,
"The amount of wait time until timeout for a connection attempt")
- dbRedisReadTimeout = cgrLoaderFlags.Duration(utils.RedisReadTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisReadTimeout,
+ dbRedisReadTimeout = cgrLoaderFlags.Duration(utils.RedisReadTimeoutCfg, dfltCfg.DbCfg().Opts.RedisReadTimeout,
"The amount of wait time until timeout for reading operations")
- dbRedisWriteTimeout = cgrLoaderFlags.Duration(utils.RedisWriteTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisWriteTimeout,
+ dbRedisWriteTimeout = cgrLoaderFlags.Duration(utils.RedisWriteTimeoutCfg, dfltCfg.DbCfg().Opts.RedisWriteTimeout,
"The amount of wait time until timeout for writing operations")
- dbRedisPoolPipelineWindow = cgrLoaderFlags.Duration(utils.RedisPoolPipelineWindowCfg, dfltCfg.DataDbCfg().Opts.RedisPoolPipelineWindow,
+ dbRedisPoolPipelineWindow = cgrLoaderFlags.Duration(utils.RedisPoolPipelineWindowCfg, dfltCfg.DbCfg().Opts.RedisPoolPipelineWindow,
"Duration after which internal pipelines are flushed. Zero disables implicit pipelining.")
- dbRedisPoolPipelineLimit = cgrLoaderFlags.Int(utils.RedisPoolPipelineLimitCfg, dfltCfg.DataDbCfg().Opts.RedisPoolPipelineLimit,
+ dbRedisPoolPipelineLimit = cgrLoaderFlags.Int(utils.RedisPoolPipelineLimitCfg, dfltCfg.DbCfg().Opts.RedisPoolPipelineLimit,
"Maximum number of commands that can be pipelined before flushing. Zero means no limit.")
dbRedisTls = cgrLoaderFlags.Bool(utils.RedisTLSCfg, false, "Enable TLS when connecting to Redis")
dbRedisClientCertificate = cgrLoaderFlags.String(utils.RedisClientCertificateCfg, utils.EmptyString, "Path to the client certificate")
dbRedisClientKey = cgrLoaderFlags.String(utils.RedisClientKeyCfg, utils.EmptyString, "Path to the client key")
dbRedisCACertificate = cgrLoaderFlags.String(utils.RedisCACertificateCfg, utils.EmptyString, "Path to the CA certificate")
- dbQueryTimeout = cgrLoaderFlags.Duration(utils.MongoQueryTimeoutCfg, dfltCfg.DataDbCfg().Opts.MongoQueryTimeout,
+ dbQueryTimeout = cgrLoaderFlags.Duration(utils.MongoQueryTimeoutCfg, dfltCfg.DbCfg().Opts.MongoQueryTimeout,
"The timeout for queries")
- dbMongoConnScheme = cgrLoaderFlags.String(utils.MongoConnSchemeCfg, dfltCfg.DataDbCfg().Opts.MongoConnScheme,
+ dbMongoConnScheme = cgrLoaderFlags.String(utils.MongoConnSchemeCfg, dfltCfg.DbCfg().Opts.MongoConnScheme,
"Scheme for MongoDB connection ")
cachingArg = cgrLoaderFlags.String(utils.CachingArgCgr, utils.EmptyString,
@@ -128,7 +128,7 @@ func loadConfig() (ldrCfg *config.CGRConfig) {
d, err := engine.NewDataDBConn(ldrCfg.ConfigDBCfg().Type,
ldrCfg.ConfigDBCfg().Host, ldrCfg.ConfigDBCfg().Port,
ldrCfg.ConfigDBCfg().Name, ldrCfg.ConfigDBCfg().User,
- ldrCfg.ConfigDBCfg().Password, ldrCfg.GeneralCfg().DBDataEncoding,
+ ldrCfg.ConfigDBCfg().Password, ldrCfg.GeneralCfg().DBDataEncoding, nil, nil,
ldrCfg.ConfigDBCfg().Opts, nil)
if err != nil { // Cannot configure getter database, show stopper
utils.Logger.Crit(fmt.Sprintf("Could not configure configDB: %s exiting!", err))
@@ -142,80 +142,80 @@ func loadConfig() (ldrCfg *config.CGRConfig) {
config.SetCgrConfig(ldrCfg)
}
// Data for DataDB
- if *dataDBType != dfltCfg.DataDbCfg().Type {
- ldrCfg.DataDbCfg().Type = *dataDBType
+ if *dataDBType != dfltCfg.DbCfg().DBConns[utils.MetaDefault].Type {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Type = *dataDBType
}
- if *dataDBHost != dfltCfg.DataDbCfg().Host {
- ldrCfg.DataDbCfg().Host = *dataDBHost
+ if *dataDBHost != dfltCfg.DbCfg().DBConns[utils.MetaDefault].Host {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Host = *dataDBHost
}
- if *dataDBPort != dfltCfg.DataDbCfg().Port {
- ldrCfg.DataDbCfg().Port = *dataDBPort
+ if *dataDBPort != dfltCfg.DbCfg().DBConns[utils.MetaDefault].Port {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Port = *dataDBPort
}
- if *dataDBName != dfltCfg.DataDbCfg().Name {
- ldrCfg.DataDbCfg().Name = *dataDBName
+ if *dataDBName != dfltCfg.DbCfg().DBConns[utils.MetaDefault].Name {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Name = *dataDBName
}
- if *dataDBUser != dfltCfg.DataDbCfg().User {
- ldrCfg.DataDbCfg().User = *dataDBUser
+ if *dataDBUser != dfltCfg.DbCfg().DBConns[utils.MetaDefault].User {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].User = *dataDBUser
}
- if *dataDBPasswd != dfltCfg.DataDbCfg().Password {
- ldrCfg.DataDbCfg().Password = *dataDBPasswd
+ if *dataDBPasswd != dfltCfg.DbCfg().DBConns[utils.MetaDefault].Password {
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Password = *dataDBPasswd
}
- if *dbRedisMaxConns != dfltCfg.DataDbCfg().Opts.RedisMaxConns {
- ldrCfg.DataDbCfg().Opts.RedisMaxConns = *dbRedisMaxConns
+ if *dbRedisMaxConns != dfltCfg.DbCfg().Opts.RedisMaxConns {
+ ldrCfg.DbCfg().Opts.RedisMaxConns = *dbRedisMaxConns
}
- if *dbRedisConnectAttempts != dfltCfg.DataDbCfg().Opts.RedisConnectAttempts {
- ldrCfg.DataDbCfg().Opts.RedisConnectAttempts = *dbRedisConnectAttempts
+ if *dbRedisConnectAttempts != dfltCfg.DbCfg().Opts.RedisConnectAttempts {
+ ldrCfg.DbCfg().Opts.RedisConnectAttempts = *dbRedisConnectAttempts
}
- if *dbRedisSentinel != dfltCfg.DataDbCfg().Opts.RedisSentinel {
- ldrCfg.DataDbCfg().Opts.RedisSentinel = *dbRedisSentinel
+ if *dbRedisSentinel != dfltCfg.DbCfg().Opts.RedisSentinel {
+ ldrCfg.DbCfg().Opts.RedisSentinel = *dbRedisSentinel
}
- if *dbRedisCluster != dfltCfg.DataDbCfg().Opts.RedisCluster {
- ldrCfg.DataDbCfg().Opts.RedisCluster = *dbRedisCluster
+ if *dbRedisCluster != dfltCfg.DbCfg().Opts.RedisCluster {
+ ldrCfg.DbCfg().Opts.RedisCluster = *dbRedisCluster
}
- if *dbRedisClusterSync != dfltCfg.DataDbCfg().Opts.RedisClusterSync {
- ldrCfg.DataDbCfg().Opts.RedisClusterSync = *dbRedisClusterSync
+ if *dbRedisClusterSync != dfltCfg.DbCfg().Opts.RedisClusterSync {
+ ldrCfg.DbCfg().Opts.RedisClusterSync = *dbRedisClusterSync
}
- if *dbRedisClusterDownDelay != dfltCfg.DataDbCfg().Opts.RedisClusterOndownDelay {
- ldrCfg.DataDbCfg().Opts.RedisClusterOndownDelay = *dbRedisClusterDownDelay
+ if *dbRedisClusterDownDelay != dfltCfg.DbCfg().Opts.RedisClusterOndownDelay {
+ ldrCfg.DbCfg().Opts.RedisClusterOndownDelay = *dbRedisClusterDownDelay
}
- if *dbRedisConnectTimeout != dfltCfg.DataDbCfg().Opts.RedisConnectTimeout {
- ldrCfg.DataDbCfg().Opts.RedisConnectTimeout = *dbRedisConnectTimeout
+ if *dbRedisConnectTimeout != dfltCfg.DbCfg().Opts.RedisConnectTimeout {
+ ldrCfg.DbCfg().Opts.RedisConnectTimeout = *dbRedisConnectTimeout
}
- if *dbRedisReadTimeout != dfltCfg.DataDbCfg().Opts.RedisReadTimeout {
- ldrCfg.DataDbCfg().Opts.RedisReadTimeout = *dbRedisReadTimeout
+ if *dbRedisReadTimeout != dfltCfg.DbCfg().Opts.RedisReadTimeout {
+ ldrCfg.DbCfg().Opts.RedisReadTimeout = *dbRedisReadTimeout
}
- if *dbRedisWriteTimeout != dfltCfg.DataDbCfg().Opts.RedisWriteTimeout {
- ldrCfg.DataDbCfg().Opts.RedisWriteTimeout = *dbRedisWriteTimeout
+ if *dbRedisWriteTimeout != dfltCfg.DbCfg().Opts.RedisWriteTimeout {
+ ldrCfg.DbCfg().Opts.RedisWriteTimeout = *dbRedisWriteTimeout
}
- if *dbRedisPoolPipelineWindow != dfltCfg.DataDbCfg().Opts.RedisPoolPipelineWindow {
- ldrCfg.DataDbCfg().Opts.RedisPoolPipelineWindow = *dbRedisPoolPipelineWindow
+ if *dbRedisPoolPipelineWindow != dfltCfg.DbCfg().Opts.RedisPoolPipelineWindow {
+ ldrCfg.DbCfg().Opts.RedisPoolPipelineWindow = *dbRedisPoolPipelineWindow
}
- if *dbRedisPoolPipelineLimit != dfltCfg.DataDbCfg().Opts.RedisPoolPipelineLimit {
- ldrCfg.DataDbCfg().Opts.RedisPoolPipelineLimit = *dbRedisPoolPipelineLimit
+ if *dbRedisPoolPipelineLimit != dfltCfg.DbCfg().Opts.RedisPoolPipelineLimit {
+ ldrCfg.DbCfg().Opts.RedisPoolPipelineLimit = *dbRedisPoolPipelineLimit
}
- if *dbQueryTimeout != dfltCfg.DataDbCfg().Opts.MongoQueryTimeout {
- ldrCfg.DataDbCfg().Opts.MongoQueryTimeout = *dbQueryTimeout
+ if *dbQueryTimeout != dfltCfg.DbCfg().Opts.MongoQueryTimeout {
+ ldrCfg.DbCfg().Opts.MongoQueryTimeout = *dbQueryTimeout
}
- if *dbMongoConnScheme != dfltCfg.DataDbCfg().Opts.MongoConnScheme {
- ldrCfg.DataDbCfg().Opts.MongoConnScheme = *dbMongoConnScheme
+ if *dbMongoConnScheme != dfltCfg.DbCfg().Opts.MongoConnScheme {
+ ldrCfg.DbCfg().Opts.MongoConnScheme = *dbMongoConnScheme
}
- if *dbRedisTls != dfltCfg.DataDbCfg().Opts.RedisTLS {
- ldrCfg.DataDbCfg().Opts.RedisTLS = *dbRedisTls
+ if *dbRedisTls != dfltCfg.DbCfg().Opts.RedisTLS {
+ ldrCfg.DbCfg().Opts.RedisTLS = *dbRedisTls
}
- if *dbRedisClientCertificate != dfltCfg.DataDbCfg().Opts.RedisClientCertificate {
- ldrCfg.DataDbCfg().Opts.RedisClientCertificate = *dbRedisClientCertificate
+ if *dbRedisClientCertificate != dfltCfg.DbCfg().Opts.RedisClientCertificate {
+ ldrCfg.DbCfg().Opts.RedisClientCertificate = *dbRedisClientCertificate
}
- if *dbRedisClientKey != dfltCfg.DataDbCfg().Opts.RedisClientKey {
- ldrCfg.DataDbCfg().Opts.RedisClientKey = *dbRedisClientKey
+ if *dbRedisClientKey != dfltCfg.DbCfg().Opts.RedisClientKey {
+ ldrCfg.DbCfg().Opts.RedisClientKey = *dbRedisClientKey
}
- if *dbRedisCACertificate != dfltCfg.DataDbCfg().Opts.RedisCACertificate {
- ldrCfg.DataDbCfg().Opts.RedisCACertificate = *dbRedisCACertificate
+ if *dbRedisCACertificate != dfltCfg.DbCfg().Opts.RedisCACertificate {
+ ldrCfg.DbCfg().Opts.RedisCACertificate = *dbRedisCACertificate
}
if *dbDataEncoding != dfltCfg.GeneralCfg().DBDataEncoding {
@@ -317,21 +317,27 @@ func main() {
// we initialize connManager here with nil for InternalChannels
engine.NewConnManager(ldrCfg)
- if dataDB, err = engine.NewDataDBConn(ldrCfg.DataDbCfg().Type,
- ldrCfg.DataDbCfg().Host, ldrCfg.DataDbCfg().Port,
- ldrCfg.DataDbCfg().Name, ldrCfg.DataDbCfg().User,
- ldrCfg.DataDbCfg().Password, ldrCfg.GeneralCfg().DBDataEncoding,
- ldrCfg.DataDbCfg().Opts, ldrCfg.DataDbCfg().Items); err != nil {
+ if dataDB, err = engine.NewDataDBConn(ldrCfg.DbCfg().DBConns[utils.MetaDefault].Type,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Host,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Port,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Name,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].User,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].Password,
+ ldrCfg.GeneralCfg().DBDataEncoding,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].StringIndexedFields,
+ ldrCfg.DbCfg().DBConns[utils.MetaDefault].PrefixIndexedFields, ldrCfg.DbCfg().Opts,
+ ldrCfg.DbCfg().Items); err != nil {
log.Fatalf("Coud not open dataDB connection: %s", err.Error())
}
defer dataDB.Close()
-
var loader engine.LoadReader
if loader, err = getLoader(ldrCfg); err != nil {
log.Fatal(err)
}
+ dbcManager := engine.NewDBConnManager(map[string]engine.DataDB{
+ utils.MetaDefault: dataDB}, ldrCfg.DbCfg())
var tpReader *engine.TpReader
- if tpReader, err = engine.NewTpReader(dataDB, loader,
+ if tpReader, err = engine.NewTpReader(dbcManager, loader,
ldrCfg.LoaderCgrCfg().TpID, ldrCfg.GeneralCfg().DefaultTimezone,
ldrCfg.LoaderCgrCfg().CachesConns,
ldrCfg.LoaderCgrCfg().ActionSConns); err != nil {
diff --git a/cmd/cgr-migrator/cgr-migrator.go b/cmd/cgr-migrator/cgr-migrator.go
index 201734cf6..4bea642b9 100755
--- a/cmd/cgr-migrator/cgr-migrator.go
+++ b/cmd/cgr-migrator/cgr-migrator.go
@@ -23,6 +23,7 @@ import (
"fmt"
"log"
"os"
+ "slices"
"strings"
"github.com/cgrates/birpc/context"
@@ -36,8 +37,8 @@ var (
cgrMigratorFlags = flag.NewFlagSet(utils.CgrMigrator, flag.ContinueOnError)
sameDataDB bool
- dmIN migrator.MigratorDataDB
- dmOUT migrator.MigratorDataDB
+ dmFrom = make(map[string]migrator.MigratorDataDB)
+ dmTo = make(map[string]migrator.MigratorDataDB)
err error
dfltCfg = config.NewDefaultCGRConfig()
cfgPath = cgrMigratorFlags.String(utils.CfgPathCgr, utils.EmptyString,
@@ -47,66 +48,40 @@ var (
"<*set_versions|*cost_details|*accounts|*actions|*action_triggers|*action_plans|*shared_groups|*filters|*datadb>")
version = cgrMigratorFlags.Bool(utils.VersionCgr, false, "prints the application version")
- inDataDBType = cgrMigratorFlags.String(utils.DataDBTypeCgr, dfltCfg.DataDbCfg().Type,
- "the type of the DataDB Database <*redis|*mongo>")
- inDataDBHost = cgrMigratorFlags.String(utils.DataDBHostCgr, dfltCfg.DataDbCfg().Host,
- "the DataDB host")
- inDataDBPort = cgrMigratorFlags.String(utils.DataDBPortCgr, dfltCfg.DataDbCfg().Port,
- "the DataDB port")
- inDataDBName = cgrMigratorFlags.String(utils.DataDBNameCgr, dfltCfg.DataDbCfg().Name,
- "the name/number of the DataDB")
- inDataDBUser = cgrMigratorFlags.String(utils.DataDBUserCgr, dfltCfg.DataDbCfg().User,
- "the DataDB user")
- inDataDBPass = cgrMigratorFlags.String(utils.DataDBPasswdCgr, dfltCfg.DataDbCfg().Password,
- "the DataDB password")
inDBDataEncoding = cgrMigratorFlags.String(utils.DBDataEncodingCfg, dfltCfg.GeneralCfg().DBDataEncoding,
"the encoding used to store object Data in strings")
- dbRedisMaxConns = cgrMigratorFlags.Int(utils.RedisMaxConnsCfg, dfltCfg.DataDbCfg().Opts.RedisMaxConns,
+ dbRedisMaxConns = cgrMigratorFlags.Int(utils.RedisMaxConnsCfg, dfltCfg.DbCfg().Opts.RedisMaxConns,
"The connection pool size")
- dbRedisConnectAttempts = cgrMigratorFlags.Int(utils.RedisConnectAttemptsCfg, dfltCfg.DataDbCfg().Opts.RedisConnectAttempts,
+ dbRedisConnectAttempts = cgrMigratorFlags.Int(utils.RedisConnectAttemptsCfg, dfltCfg.DbCfg().Opts.RedisConnectAttempts,
"The maximum amount of dial attempts")
- inDataDBRedisSentinel = cgrMigratorFlags.String(utils.RedisSentinelNameCfg, dfltCfg.DataDbCfg().Opts.RedisSentinel,
+ inDataDBRedisSentinel = cgrMigratorFlags.String(utils.RedisSentinelNameCfg, dfltCfg.DbCfg().Opts.RedisSentinel,
"the name of redis sentinel")
dbRedisCluster = cgrMigratorFlags.Bool(utils.RedisClusterCfg, false,
"Is the redis datadb a cluster")
- dbRedisClusterSync = cgrMigratorFlags.Duration(utils.RedisClusterSyncCfg, dfltCfg.DataDbCfg().Opts.RedisClusterSync,
+ dbRedisClusterSync = cgrMigratorFlags.Duration(utils.RedisClusterSyncCfg, dfltCfg.DbCfg().Opts.RedisClusterSync,
"The sync interval for the redis cluster")
- dbRedisClusterDownDelay = cgrMigratorFlags.Duration(utils.RedisClusterOnDownDelayCfg, dfltCfg.DataDbCfg().Opts.RedisClusterOndownDelay,
+ dbRedisClusterDownDelay = cgrMigratorFlags.Duration(utils.RedisClusterOnDownDelayCfg, dfltCfg.DbCfg().Opts.RedisClusterOndownDelay,
"The delay before executing the commands if the redis cluster is in the CLUSTERDOWN state")
- dbRedisConnectTimeout = cgrMigratorFlags.Duration(utils.RedisConnectTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisConnectTimeout,
+ dbRedisConnectTimeout = cgrMigratorFlags.Duration(utils.RedisConnectTimeoutCfg, dfltCfg.DbCfg().Opts.RedisConnectTimeout,
"The amount of wait time until timeout for a connection attempt")
- dbRedisReadTimeout = cgrMigratorFlags.Duration(utils.RedisReadTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisReadTimeout,
+ dbRedisReadTimeout = cgrMigratorFlags.Duration(utils.RedisReadTimeoutCfg, dfltCfg.DbCfg().Opts.RedisReadTimeout,
"The amount of wait time until timeout for reading operations")
- dbRedisWriteTimeout = cgrMigratorFlags.Duration(utils.RedisWriteTimeoutCfg, dfltCfg.DataDbCfg().Opts.RedisWriteTimeout,
+ dbRedisWriteTimeout = cgrMigratorFlags.Duration(utils.RedisWriteTimeoutCfg, dfltCfg.DbCfg().Opts.RedisWriteTimeout,
"The amount of wait time until timeout for writing operations")
- dbRedisPoolPipelineWindow = cgrMigratorFlags.Duration(utils.RedisPoolPipelineWindowCfg, dfltCfg.DataDbCfg().Opts.RedisPoolPipelineWindow,
+ dbRedisPoolPipelineWindow = cgrMigratorFlags.Duration(utils.RedisPoolPipelineWindowCfg, dfltCfg.DbCfg().Opts.RedisPoolPipelineWindow,
"Duration after which internal pipelines are flushed. Zero disables implicit pipelining.")
- dbRedisPoolPipelineLimit = cgrMigratorFlags.Int(utils.RedisPoolPipelineLimitCfg, dfltCfg.DataDbCfg().Opts.RedisPoolPipelineLimit,
+ dbRedisPoolPipelineLimit = cgrMigratorFlags.Int(utils.RedisPoolPipelineLimitCfg, dfltCfg.DbCfg().Opts.RedisPoolPipelineLimit,
"Maximum number of commands that can be pipelined before flushing. Zero means no limit.")
dbRedisTls = cgrMigratorFlags.Bool(utils.RedisTLSCfg, false, "Enable TLS when connecting to Redis")
dbRedisClientCertificate = cgrMigratorFlags.String(utils.RedisClientCertificateCfg, utils.EmptyString, "Path to the client certificate")
dbRedisClientKey = cgrMigratorFlags.String(utils.RedisClientKeyCfg, utils.EmptyString, "Path to the client key")
dbRedisCACertificate = cgrMigratorFlags.String(utils.RedisCACertificateCfg, utils.EmptyString, "Path to the CA certificate")
- dbQueryTimeout = cgrMigratorFlags.Duration(utils.MongoQueryTimeoutCfg, dfltCfg.DataDbCfg().Opts.MongoQueryTimeout,
+ dbQueryTimeout = cgrMigratorFlags.Duration(utils.MongoQueryTimeoutCfg, dfltCfg.DbCfg().Opts.MongoQueryTimeout,
"The timeout for queries")
- dbMongoConnScheme = cgrMigratorFlags.String(utils.MongoConnSchemeCfg, dfltCfg.DataDbCfg().Opts.MongoConnScheme,
+ dbMongoConnScheme = cgrMigratorFlags.String(utils.MongoConnSchemeCfg, dfltCfg.DbCfg().Opts.MongoConnScheme,
"Scheme for MongoDB connection ")
- outDataDBType = cgrMigratorFlags.String(utils.OutDataDBTypeCfg, utils.MetaDataDB,
- "output DataDB type <*redis|*mongo>")
- outDataDBHost = cgrMigratorFlags.String(utils.OutDataDBHostCfg, utils.MetaDataDB,
- "output DataDB host to connect to")
- outDataDBPort = cgrMigratorFlags.String(utils.OutDataDBPortCfg, utils.MetaDataDB,
- "output DataDB port")
- outDataDBName = cgrMigratorFlags.String(utils.OutDataDBNameCfg, utils.MetaDataDB,
- "output DataDB name/number")
- outDataDBUser = cgrMigratorFlags.String(utils.OutDataDBUserCfg, utils.MetaDataDB,
- "output DataDB user")
- outDataDBPass = cgrMigratorFlags.String(utils.OutDataDBPasswordCfg, utils.MetaDataDB,
- "output DataDB password")
- outDBDataEncoding = cgrMigratorFlags.String(utils.OutDataDBEncodingCfg, utils.MetaDataDB,
- "the encoding used to store object Data in strings in move mode")
- outDataDBRedisSentinel = cgrMigratorFlags.String(utils.OutDataDBRedisSentinel, utils.MetaDataDB,
+ outDataDBRedisSentinel = cgrMigratorFlags.String(utils.OutDBRedisSentinel, utils.MetaDataDB,
"the name of redis sentinel")
dryRun = cgrMigratorFlags.Bool(utils.DryRunCfg, false,
"parse loaded data for consistency and errors, without storing it")
@@ -135,7 +110,7 @@ func main() {
d, err := engine.NewDataDBConn(mgrCfg.ConfigDBCfg().Type,
mgrCfg.ConfigDBCfg().Host, mgrCfg.ConfigDBCfg().Port,
mgrCfg.ConfigDBCfg().Name, mgrCfg.ConfigDBCfg().User,
- mgrCfg.ConfigDBCfg().Password, mgrCfg.GeneralCfg().DBDataEncoding,
+ mgrCfg.ConfigDBCfg().Password, mgrCfg.GeneralCfg().DBDataEncoding, nil, nil,
mgrCfg.ConfigDBCfg().Opts, nil)
if err != nil { // Cannot configure getter database, show stopper
utils.Logger.Crit(fmt.Sprintf("Could not configure configDB: %s exiting!", err))
@@ -149,178 +124,112 @@ func main() {
config.SetCgrConfig(mgrCfg)
}
- // inDataDB
- if *inDataDBType != dfltCfg.DataDbCfg().Type {
- mgrCfg.DataDbCfg().Type = *inDataDBType
- }
- if *inDataDBHost != dfltCfg.DataDbCfg().Host {
- mgrCfg.DataDbCfg().Host = *inDataDBHost
- }
- if *inDataDBPort != dfltCfg.DataDbCfg().Port {
- mgrCfg.DataDbCfg().Port = *inDataDBPort
- }
- if *inDataDBName != dfltCfg.DataDbCfg().Name {
- mgrCfg.DataDbCfg().Name = *inDataDBName
- }
- if *inDataDBUser != dfltCfg.DataDbCfg().User {
- mgrCfg.DataDbCfg().User = *inDataDBUser
- }
- if *inDataDBPass != dfltCfg.DataDbCfg().Password {
- mgrCfg.DataDbCfg().Password = *inDataDBPass
- }
if *inDBDataEncoding != dfltCfg.GeneralCfg().DBDataEncoding {
mgrCfg.GeneralCfg().DBDataEncoding = *inDBDataEncoding
}
- if *dbRedisMaxConns != dfltCfg.DataDbCfg().Opts.RedisMaxConns {
- mgrCfg.DataDbCfg().Opts.RedisMaxConns = *dbRedisMaxConns
+ if *dbRedisMaxConns != dfltCfg.DbCfg().Opts.RedisMaxConns {
+ mgrCfg.DbCfg().Opts.RedisMaxConns = *dbRedisMaxConns
}
- if *dbRedisConnectAttempts != dfltCfg.DataDbCfg().Opts.RedisConnectAttempts {
- mgrCfg.DataDbCfg().Opts.RedisConnectAttempts = *dbRedisConnectAttempts
+ if *dbRedisConnectAttempts != dfltCfg.DbCfg().Opts.RedisConnectAttempts {
+ mgrCfg.DbCfg().Opts.RedisConnectAttempts = *dbRedisConnectAttempts
}
- if *inDataDBRedisSentinel != dfltCfg.DataDbCfg().Opts.RedisSentinel {
- mgrCfg.DataDbCfg().Opts.RedisSentinel = *inDataDBRedisSentinel
+ if *inDataDBRedisSentinel != dfltCfg.DbCfg().Opts.RedisSentinel {
+ mgrCfg.DbCfg().Opts.RedisSentinel = *inDataDBRedisSentinel
}
- if *dbRedisCluster != dfltCfg.DataDbCfg().Opts.RedisCluster {
- mgrCfg.DataDbCfg().Opts.RedisCluster = *dbRedisCluster
+ if *dbRedisCluster != dfltCfg.DbCfg().Opts.RedisCluster {
+ mgrCfg.DbCfg().Opts.RedisCluster = *dbRedisCluster
}
- if *dbRedisClusterSync != dfltCfg.DataDbCfg().Opts.RedisClusterSync {
- mgrCfg.DataDbCfg().Opts.RedisClusterSync = *dbRedisClusterSync
+ if *dbRedisClusterSync != dfltCfg.DbCfg().Opts.RedisClusterSync {
+ mgrCfg.DbCfg().Opts.RedisClusterSync = *dbRedisClusterSync
}
- if *dbRedisClusterDownDelay != dfltCfg.DataDbCfg().Opts.RedisClusterOndownDelay {
- mgrCfg.DataDbCfg().Opts.RedisClusterOndownDelay = *dbRedisClusterDownDelay
+ if *dbRedisClusterDownDelay != dfltCfg.DbCfg().Opts.RedisClusterOndownDelay {
+ mgrCfg.DbCfg().Opts.RedisClusterOndownDelay = *dbRedisClusterDownDelay
}
- if *dbRedisConnectTimeout != dfltCfg.DataDbCfg().Opts.RedisConnectTimeout {
- mgrCfg.DataDbCfg().Opts.RedisConnectTimeout = *dbRedisConnectTimeout
+ if *dbRedisConnectTimeout != dfltCfg.DbCfg().Opts.RedisConnectTimeout {
+ mgrCfg.DbCfg().Opts.RedisConnectTimeout = *dbRedisConnectTimeout
}
- if *dbRedisReadTimeout != dfltCfg.DataDbCfg().Opts.RedisReadTimeout {
- mgrCfg.DataDbCfg().Opts.RedisReadTimeout = *dbRedisReadTimeout
+ if *dbRedisReadTimeout != dfltCfg.DbCfg().Opts.RedisReadTimeout {
+ mgrCfg.DbCfg().Opts.RedisReadTimeout = *dbRedisReadTimeout
}
- if *dbRedisWriteTimeout != dfltCfg.DataDbCfg().Opts.RedisWriteTimeout {
- mgrCfg.DataDbCfg().Opts.RedisWriteTimeout = *dbRedisWriteTimeout
+ if *dbRedisWriteTimeout != dfltCfg.DbCfg().Opts.RedisWriteTimeout {
+ mgrCfg.DbCfg().Opts.RedisWriteTimeout = *dbRedisWriteTimeout
}
- if *dbRedisPoolPipelineWindow != dfltCfg.DataDbCfg().Opts.RedisPoolPipelineWindow {
- mgrCfg.DataDbCfg().Opts.RedisPoolPipelineWindow = *dbRedisPoolPipelineWindow
+ if *dbRedisPoolPipelineWindow != dfltCfg.DbCfg().Opts.RedisPoolPipelineWindow {
+ mgrCfg.DbCfg().Opts.RedisPoolPipelineWindow = *dbRedisPoolPipelineWindow
}
- if *dbRedisPoolPipelineLimit != dfltCfg.DataDbCfg().Opts.RedisPoolPipelineLimit {
- mgrCfg.DataDbCfg().Opts.RedisPoolPipelineLimit = *dbRedisPoolPipelineLimit
+ if *dbRedisPoolPipelineLimit != dfltCfg.DbCfg().Opts.RedisPoolPipelineLimit {
+ mgrCfg.DbCfg().Opts.RedisPoolPipelineLimit = *dbRedisPoolPipelineLimit
}
- if *dbRedisTls != dfltCfg.DataDbCfg().Opts.RedisTLS {
- mgrCfg.DataDbCfg().Opts.RedisTLS = *dbRedisTls
+ if *dbRedisTls != dfltCfg.DbCfg().Opts.RedisTLS {
+ mgrCfg.DbCfg().Opts.RedisTLS = *dbRedisTls
}
- if *dbRedisClientCertificate != dfltCfg.DataDbCfg().Opts.RedisClientCertificate {
- mgrCfg.DataDbCfg().Opts.RedisClientCertificate = *dbRedisClientCertificate
+ if *dbRedisClientCertificate != dfltCfg.DbCfg().Opts.RedisClientCertificate {
+ mgrCfg.DbCfg().Opts.RedisClientCertificate = *dbRedisClientCertificate
}
- if *dbRedisClientKey != dfltCfg.DataDbCfg().Opts.RedisClientKey {
- mgrCfg.DataDbCfg().Opts.RedisClientKey = *dbRedisClientKey
+ if *dbRedisClientKey != dfltCfg.DbCfg().Opts.RedisClientKey {
+ mgrCfg.DbCfg().Opts.RedisClientKey = *dbRedisClientKey
}
- if *dbRedisCACertificate != dfltCfg.DataDbCfg().Opts.RedisCACertificate {
- mgrCfg.DataDbCfg().Opts.RedisCACertificate = *dbRedisCACertificate
+ if *dbRedisCACertificate != dfltCfg.DbCfg().Opts.RedisCACertificate {
+ mgrCfg.DbCfg().Opts.RedisCACertificate = *dbRedisCACertificate
}
- if *dbQueryTimeout != dfltCfg.DataDbCfg().Opts.MongoQueryTimeout {
- mgrCfg.DataDbCfg().Opts.MongoQueryTimeout = *dbQueryTimeout
+ if *dbQueryTimeout != dfltCfg.DbCfg().Opts.MongoQueryTimeout {
+ mgrCfg.DbCfg().Opts.MongoQueryTimeout = *dbQueryTimeout
}
- if *dbMongoConnScheme != dfltCfg.DataDbCfg().Opts.MongoConnScheme {
- mgrCfg.DataDbCfg().Opts.MongoConnScheme = *dbMongoConnScheme
+ if *dbMongoConnScheme != dfltCfg.DbCfg().Opts.MongoConnScheme {
+ mgrCfg.DbCfg().Opts.MongoConnScheme = *dbMongoConnScheme
}
- // outDataDB
- if *outDataDBType == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBType == mgrCfg.MigratorCgrCfg().OutDataDBType {
- mgrCfg.MigratorCgrCfg().OutDataDBType = mgrCfg.DataDbCfg().Type
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBType = *outDataDBType
- }
-
- if *outDataDBHost == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBHost == mgrCfg.MigratorCgrCfg().OutDataDBHost {
- mgrCfg.MigratorCgrCfg().OutDataDBHost = mgrCfg.DataDbCfg().Host
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBHost = *outDataDBHost
- }
- if *outDataDBPort == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBPort == mgrCfg.MigratorCgrCfg().OutDataDBPort {
- mgrCfg.MigratorCgrCfg().OutDataDBPort = mgrCfg.DataDbCfg().Port
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBPort = *outDataDBPort
- }
- if *outDataDBName == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBName == mgrCfg.MigratorCgrCfg().OutDataDBName {
- mgrCfg.MigratorCgrCfg().OutDataDBName = mgrCfg.DataDbCfg().Name
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBName = *outDataDBName
- }
- if *outDataDBUser == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBUser == mgrCfg.MigratorCgrCfg().OutDataDBUser {
- mgrCfg.MigratorCgrCfg().OutDataDBUser = mgrCfg.DataDbCfg().User
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBUser = *outDataDBUser
- }
- if *outDataDBPass == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBPassword == mgrCfg.MigratorCgrCfg().OutDataDBPassword {
- mgrCfg.MigratorCgrCfg().OutDataDBPassword = mgrCfg.DataDbCfg().Password
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBPassword = *outDataDBPass
- }
- if *outDBDataEncoding == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBEncoding == mgrCfg.MigratorCgrCfg().OutDataDBEncoding {
- mgrCfg.MigratorCgrCfg().OutDataDBEncoding = mgrCfg.GeneralCfg().DBDataEncoding
- }
- } else {
- mgrCfg.MigratorCgrCfg().OutDataDBEncoding = *outDBDataEncoding
- }
if *outDataDBRedisSentinel == utils.MetaDataDB {
- if dfltCfg.MigratorCgrCfg().OutDataDBOpts.RedisSentinel == mgrCfg.MigratorCgrCfg().OutDataDBOpts.RedisSentinel {
- mgrCfg.MigratorCgrCfg().OutDataDBOpts.RedisSentinel = dfltCfg.DataDbCfg().Opts.RedisSentinel
+ if dfltCfg.MigratorCgrCfg().OutDBOpts.RedisSentinel == mgrCfg.MigratorCgrCfg().OutDBOpts.RedisSentinel {
+ mgrCfg.MigratorCgrCfg().OutDBOpts.RedisSentinel = dfltCfg.DbCfg().Opts.RedisSentinel
}
} else {
- mgrCfg.MigratorCgrCfg().OutDataDBOpts.RedisSentinel = *outDataDBRedisSentinel
+ mgrCfg.MigratorCgrCfg().OutDBOpts.RedisSentinel = *outDataDBRedisSentinel
}
- sameDataDB = mgrCfg.MigratorCgrCfg().OutDataDBType == mgrCfg.DataDbCfg().Type &&
- mgrCfg.MigratorCgrCfg().OutDataDBHost == mgrCfg.DataDbCfg().Host &&
- mgrCfg.MigratorCgrCfg().OutDataDBPort == mgrCfg.DataDbCfg().Port &&
- mgrCfg.MigratorCgrCfg().OutDataDBName == mgrCfg.DataDbCfg().Name &&
- mgrCfg.MigratorCgrCfg().OutDataDBEncoding == mgrCfg.GeneralCfg().DBDataEncoding
+ toDBIDsList := []string{} // collect all DBConns of Items in data_db config
+ for _, item := range mgrCfg.DbCfg().Items {
+ if !slices.Contains(toDBIDsList, item.DBConn) {
+ toDBIDsList = append(toDBIDsList, item.DBConn)
+ }
+ }
- if dmIN, err = migrator.NewMigratorDataDB(mgrCfg.DataDbCfg().Type,
- mgrCfg.DataDbCfg().Host, mgrCfg.DataDbCfg().Port,
- mgrCfg.DataDbCfg().Name, mgrCfg.DataDbCfg().User,
- mgrCfg.DataDbCfg().Password, mgrCfg.GeneralCfg().DBDataEncoding,
- mgrCfg, mgrCfg.DataDbCfg().Opts, mgrCfg.DataDbCfg().Items); err != nil {
+ fromDBIDsList := []string{} // collect all DBConns of MigratorFromItems in migrator config
+ for _, item := range mgrCfg.MigratorCgrCfg().FromItems {
+ if !slices.Contains(fromDBIDsList, item.DBConn) {
+ fromDBIDsList = append(fromDBIDsList, item.DBConn)
+ }
+ }
+
+ // order and compare the DBConns. If IDs are the same it means the db conns will be the same
+ sameDataDB = utils.EqualUnorderedStringSlices(fromDBIDsList, toDBIDsList)
+
+ if dmFrom, err = migrator.NewMigratorDataDBs(fromDBIDsList, mgrCfg.GeneralCfg().DBDataEncoding, mgrCfg); err != nil {
log.Fatal(err)
}
+
if *printConfig {
cfgJSON := utils.ToIJSON(mgrCfg.AsMapInterface())
log.Printf("Configuration loaded from %q:\n%s", *cfgPath, cfgJSON)
}
if sameDataDB {
- dmOUT = dmIN
- } else if dmOUT, err = migrator.NewMigratorDataDB(mgrCfg.MigratorCgrCfg().OutDataDBType,
- mgrCfg.MigratorCgrCfg().OutDataDBHost, mgrCfg.MigratorCgrCfg().OutDataDBPort,
- mgrCfg.MigratorCgrCfg().OutDataDBName, mgrCfg.MigratorCgrCfg().OutDataDBUser,
- mgrCfg.MigratorCgrCfg().OutDataDBPassword, mgrCfg.MigratorCgrCfg().OutDataDBEncoding,
- mgrCfg, mgrCfg.MigratorCgrCfg().OutDataDBOpts, mgrCfg.DataDbCfg().Items); err != nil {
- log.Fatal(err)
+ dmTo = dmFrom
+ } else {
+ if dmTo, err = migrator.NewMigratorDataDBs(toDBIDsList, mgrCfg.GeneralCfg().DBDataEncoding, mgrCfg); err != nil {
+ log.Fatal(err)
+ }
}
- m, err := migrator.NewMigrator(dmIN, dmOUT,
- *dryRun, sameDataDB)
+ m, err := migrator.NewMigrator(mgrCfg.DbCfg(), dmFrom, dmTo, *dryRun, sameDataDB)
if err != nil {
log.Fatal(err)
}
defer m.Close()
config.SetCgrConfig(mgrCfg)
if exec != nil && *exec != utils.EmptyString { // Run migrator
- migrstats := make(map[string]int)
mig := strings.Split(*exec, utils.FieldsSep)
- err, migrstats = m.Migrate(mig)
+ err, migrstats := m.Migrate(mig)
if err != nil {
log.Fatal(err)
}
diff --git a/config/apis.go b/config/apis.go
index 1f75c7a34..fb97bfa51 100644
--- a/config/apis.go
+++ b/config/apis.go
@@ -384,18 +384,12 @@ func storeDiffSection(ctx *context.Context, section string, db ConfigDB, v1, v2
return
}
return db.SetSection(ctx, section, diffHTTPJsonCfg(jsn, v1.HTTPCfg(), v2.HTTPCfg()))
- case DataDBJSON:
+ case DBJSON:
jsn := new(DbJsonCfg)
if err = db.GetSection(ctx, section, jsn); err != nil {
return
}
- return db.SetSection(ctx, section, diffDataDBJsonCfg(jsn, v1.DataDbCfg(), v2.DataDbCfg()))
- case StorDBJSON:
- jsn := new(DbJsonCfg)
- if err = db.GetSection(ctx, section, jsn); err != nil {
- return
- }
- return db.SetSection(ctx, section, diffStorDBJsonCfg(jsn, v1.StorDbCfg(), v2.StorDbCfg()))
+ return db.SetSection(ctx, section, diffDataDBJsonCfg(jsn, v1.DbCfg(), v2.DbCfg()))
case FilterSJSON:
jsn := new(FilterSJsonCfg)
if err = db.GetSection(ctx, section, jsn); err != nil {
diff --git a/config/config.go b/config/config.go
index 555253317..80d0ce14a 100644
--- a/config/config.go
+++ b/config/config.go
@@ -106,13 +106,10 @@ func newCGRConfig(config []byte) (cfg *CGRConfig, err error) {
loggerCfg: &LoggerCfg{
Opts: new(LoggerOptsCfg),
},
- dataDbCfg: &DataDbCfg{
- Items: make(map[string]*ItemOpts),
- Opts: &DataDBOpts{},
- },
- storDbCfg: &StorDbCfg{
- Items: make(map[string]*ItemOpts),
- Opts: &StorDBOpts{},
+ dbCfg: &DbCfg{
+ DBConns: make(DBConns),
+ Items: make(map[string]*ItemOpts),
+ Opts: &DBOpts{},
},
tlsCfg: new(TLSCfg),
cacheCfg: &CacheCfg{Partitions: make(map[string]*CacheParamCfg)},
@@ -237,7 +234,8 @@ func newCGRConfig(config []byte) (cfg *CGRConfig, err error) {
},
loaderCgrCfg: new(LoaderCgrCfg),
migratorCgrCfg: &MigratorCgrCfg{
- OutDataDBOpts: &DataDBOpts{},
+ FromItems: make(map[string]*MigratorFromItem),
+ OutDBOpts: &DBOpts{},
},
loaderCfg: make(LoaderSCfgs, 0),
httpAgentCfg: make(HTTPAgentCfgs, 0),
@@ -268,7 +266,7 @@ func newCGRConfig(config []byte) (cfg *CGRConfig, err error) {
ProfileIgnoreFilters: []*DynamicBoolOpt{{value: AccountsProfileIgnoreFiltersDftOpt}},
}},
configDBCfg: &ConfigDBCfg{
- Opts: &DataDBOpts{},
+ Opts: &DBOpts{},
},
rldCh: make(chan string, 1),
@@ -344,8 +342,7 @@ type CGRConfig struct {
generalCfg *GeneralCfg // General config
loggerCfg *LoggerCfg // Logger config
- dataDbCfg *DataDbCfg // Database config
- storDbCfg *StorDbCfg // StorDb config
+ dbCfg *DbCfg // Database config
tlsCfg *TLSCfg // TLS config
cacheCfg *CacheCfg // Cache config
listenCfg *ListenCfg // Listen config
@@ -431,15 +428,6 @@ func (cfg *CGRConfig) GetAllSectionIDs() (s []string) {
return
}
-// loadConfigDBCfg loads the ConfigDB section of the configuration
-func (cfg *CGRConfig) loadConfigDBCfg(ctx *context.Context, jsnCfg ConfigDB) (err error) {
- jsnDBCfg := new(DbJsonCfg)
- if err = jsnCfg.GetSection(ctx, ConfigDBJSON, jsnDBCfg); err != nil {
- return
- }
- return cfg.configDBCfg.loadFromJSONCfg(jsnDBCfg)
-}
-
// SureTaxCfg use locking to retrieve the configuration, possibility later for runtime reload
func (cfg *CGRConfig) SureTaxCfg() *SureTaxCfg {
cfg.lks[SureTaxJSON].Lock()
@@ -623,17 +611,10 @@ func (cfg *CGRConfig) MigratorCgrCfg() *MigratorCgrCfg {
}
// DataDbCfg returns the config for DataDb
-func (cfg *CGRConfig) DataDbCfg() *DataDbCfg {
- cfg.lks[DataDBJSON].Lock()
- defer cfg.lks[DataDBJSON].Unlock()
- return cfg.dataDbCfg
-}
-
-// StorDbCfg returns the config for StorDb
-func (cfg *CGRConfig) StorDbCfg() *StorDbCfg {
- cfg.lks[StorDBJSON].Lock()
- defer cfg.lks[StorDBJSON].Unlock()
- return cfg.storDbCfg
+func (cfg *CGRConfig) DbCfg() *DbCfg {
+ cfg.lks[DBJSON].Lock()
+ defer cfg.lks[DBJSON].Unlock()
+ return cfg.dbCfg
}
// GeneralCfg returns the General config section
@@ -1016,32 +997,25 @@ func (cfg *CGRConfig) initChanels() {
// reloadSections sends a signal to the reload channel for the needed sections
// the list of sections should be always valid because we load the config first with this list
func (cfg *CGRConfig) reloadSections(sections ...string) {
- subsystemsThatNeedDataDB := utils.NewStringSet([]string{DataDBJSON,
+ subsystemsThatNeedDB := utils.NewStringSet([]string{DBJSON,
CDRsJSON, SessionSJSON, AttributeSJSON,
ChargerSJSON, ResourceSJSON, IPsJSON, StatSJSON, ThresholdSJSON,
RouteSJSON, LoaderSJSON, RateSJSON, AdminSJSON, AccountSJSON,
- ActionSJSON})
- subsystemsThatNeedStorDB := utils.NewStringSet([]string{StorDBJSON, CDRsJSON})
- needsDataDB := false
- needsStorDB := false
+ ActionSJSON, CDRsJSON})
+ needsDB := false
for _, section := range sections {
- if !needsDataDB && subsystemsThatNeedDataDB.Has(section) {
- needsDataDB = true
- cfg.rldCh <- SectionToService[DataDBJSON] // reload datadb before
+ if !needsDB && subsystemsThatNeedDB.Has(section) {
+ needsDB = true
+ cfg.rldCh <- SectionToService[DBJSON] // reload datadb before
}
- if !needsStorDB && subsystemsThatNeedStorDB.Has(section) {
- needsStorDB = true
- cfg.rldCh <- SectionToService[StorDBJSON] // reload stordb before
- }
- if needsDataDB && needsStorDB {
+ if needsDB {
break
}
}
runtime.Gosched()
for _, section := range sections {
if srv := SectionToService[section]; srv != utils.EmptyString &&
- section != DataDBJSON &&
- section != StorDBJSON {
+ section != DBJSON {
cfg.rldCh <- srv
}
}
@@ -1064,8 +1038,7 @@ func (cfg *CGRConfig) Clone() (cln *CGRConfig) {
templates: cfg.templates.Clone(),
generalCfg: cfg.generalCfg.Clone(),
loggerCfg: cfg.loggerCfg.Clone(),
- dataDbCfg: cfg.dataDbCfg.Clone(),
- storDbCfg: cfg.storDbCfg.Clone(),
+ dbCfg: cfg.dbCfg.Clone(),
tlsCfg: cfg.tlsCfg.Clone(),
cacheCfg: cfg.cacheCfg.Clone(),
listenCfg: cfg.listenCfg.Clone(),
diff --git a/config/config_defaults.go b/config/config_defaults.go
index e16520fa1..23c58f003 100644
--- a/config/config_defaults.go
+++ b/config/config_defaults.go
@@ -106,65 +106,75 @@ const CGRATES_CFG_JSON = `
// }
},
-"data_db": { // database used to store runtime data (eg: accounts)
- "db_type": "*redis", // data_db type: <*redis|*mongo>
- "db_host": "127.0.0.1", // data_db host address
- "db_port": 6379, // data_db port to reach the database
- "db_name": "10", // data_db database name to connect to
- "db_user": "cgrates", // username to use when connecting to data_db
- "db_password": "", // password to use when connecting to data_db
- "remote_conns":[], // the conns that are queried when the items are not found in local DB
- "remote_conn_id": "", // the ID to be sent to remote_conns to identify the connection
- "replication_conns":[], // the conns the items are replicated
- "replication_filtered": false, // if this is enabled the replication will be made only to the conns that received a get
- "replication_cache": "", // the caching action that is executed on the replication_conns when the items are replicated
+"db": { // database used to store data (eg: accounts)
+ "db_conns": {
+ "*default": { // The id of the DB connection
+ "db_type": "*internal", // db type: <*internal|*redis|*mysql|*mongo|*postgres>
+ "db_host": "", // db host address
+ "db_port": 0, // db port to reach the database
+ "db_name": "", // db database name to connect to
+ "db_user": "", // username to use when connecting to db
+ "db_password": "", // password to use when connecting to db
+ "string_indexed_fields": [], // indexes on cdrs table to speed up queries, used in case of *mongo and *internal
+ "prefix_indexed_fields": [], // prefix indexes on cdrs table to speed up queries, used in case of *internal
+ "remote_conns": [], // the conns that are queried when the items are not found in local DB
+ "remote_conn_id": "", // the ID to be sent to remote_conns to identify the connection
+ "replication_conns": [], // the conns the items are replicated
+ "replication_filtered": false, // if this is enabled the replication will be made only to the conns that received a get
+ "replication_cache": "", // the caching action that is executed on the replication_conns when the items are replicated
+ },
+ },
"items":{
- "*accounts": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*actions": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*resource_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*resources": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*ip_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*ip_allocations": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*statqueue_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*statqueues": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*threshold_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*thresholds": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*filters": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*route_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*attribute_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*charger_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*rate_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*action_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*load_ids": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*versions": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*resource_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*ip_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*stat_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*threshold_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*ranking_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*rankings": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*trend_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*trends": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false},
- "*route_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*attribute_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*charger_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*rate_profile_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*rate_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*action_profile_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*account_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false},
- "*reverse_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false}
+ // compatible db types: <*internal|*redis|*mongo>
+ "*accounts": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*actions": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*resource_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*resources": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*ip_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*ip_allocations": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*statqueue_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*statqueues": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*threshold_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*thresholds": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*filters": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*route_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*attribute_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*charger_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*rate_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*action_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*load_ids": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*versions": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*resource_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*ip_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*stat_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*threshold_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*ranking_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*rankings": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*trend_profiles": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*trends": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"},
+ "*route_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*attribute_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*charger_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*rate_profile_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*rate_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*action_profile_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*account_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+ "*reverse_filter_indexes" : {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate": false, "dbConn": "*default"},
+
+ // compatible db types: <*internal|*mysql|*mongo|*postgres>
+ "*cdrs": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false, "dbConn": "*default"} // Compatible only for Internal, MySQL, Mongo and PostgresSQL databases
},
"opts":{
- "internalDBDumpPath": "/var/lib/cgrates/internal_db/datadb", // the path where datadb will be dumped
- "internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/datadb", // default path taken by AdminSv1.BackupDataDB when "BackupFolderPath" is not provided
+ "internalDBDumpPath": "/var/lib/cgrates/internal_db/db", // the path where db will be dumped
+ "internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/db", // default path taken by AdminSv1.Backupdb when "BackupFolderPath" is not provided
"internalDBStartTimeout": "5m", // the amount of wait time until timeout for DB startup
- "internalDBDumpInterval": "0s", // dump datadb regularly to a file: "0" - disables it; "-1" - dump on each set/remove; <""|$dur>
- "internalDBRewriteInterval": "0s", // rewrite dump files regularly: "0" - disables it; "-1" - rewrite on engine start; "-2" - rewrite on engine shutdown; <""|$dur>
+ "internalDBDumpInterval": "1m", // dump db regularly to a file: "0" - disables it; "-1" - dump on each set/remove; <""|$dur>
+ "internalDBRewriteInterval": "1h", // rewrite dump files regularly: "0" - disables it; "-1" - rewrite on engine start; "-2" - rewrite on engine shutdown; <""|$dur>
"internalDBFileSizeLimit": "1GB", // maximum size that can be written in a singular dump file
"redisMaxConns": 10, // the connection pool size
"redisConnectAttempts": 20, // the maximum amount of dial attempts
"redisSentinel": "", // the name of sentinel when used
- "redisCluster": false, // if enabled the datadb will try to connect to the redis cluster
+ "redisCluster": false, // if enabled the db will try to connect to the redis cluster
"redisClusterSync": "5s", // the sync interval for the redis cluster
"redisClusterOndownDelay": "0", // the delay before executing the commands if the redis cluster is in the CLUSTERDOWN state
"redisConnectTimeout": "0", // the amount of wait time until timeout for a connection attempt
@@ -176,27 +186,6 @@ const CGRATES_CFG_JSON = `
"redisClientCertificate": "", // path to client certificate
"redisClientKey": "", // path to client key
"redisCACertificate": "", // path to CA certificate (populate for self-signed certificate otherwise let it empty)
- "mongoQueryTimeout": "10s", // timeout for query when mongo is used
- "mongoConnScheme": "mongodb" // scheme for MongoDB connection
- }
-},
-
-"stor_db": { // database used to store offline tariff plans and CDRs
- "db_type": "*mysql", // stor database type to use: <*mongo|*mysql|*postgres|*internal>
- "db_host": "127.0.0.1", // the host to connect to
- "db_port": 3306, // the port to reach the stor_db
- "db_name": "cgrates", // stor database name
- "db_user": "cgrates", // username to use when connecting to stor_db
- "db_password": "CGRateS.org", // password to use when connecting to stor_db
- "string_indexed_fields": [], // indexes on cdrs table to speed up queries, used in case of *mongo and *internal
- "prefix_indexed_fields": [], // prefix indexes on cdrs table to speed up queries, used in case of *internal
- "opts": {
- "internalDBDumpPath": "/var/lib/cgrates/internal_db/stordb", // the path where stordb will be dumped
- "internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/stordb", // default path taken by AdminSv1.BackupStorDB when "BackupFolderPath" is not provided
- "internalDBStartTimeout": "5m", // the amount of wait time until timeout for DB startup
- "internalDBDumpInterval": "0s", // dump datadb regularly to a file: "0" - disables it; "-1" - dump on each set/remove; <""|$dur>
- "internalDBRewriteInterval": "0s", // rewrite dump files regularly: "0" - disables it; "-1" - rewrite on engine start; "-2" - rewrite on engine shutdown; <""|$dur>
- "internalDBFileSizeLimit": "1GB", // maximum size that can be written in a singular dump file
"sqlMaxOpenConns": 100, // maximum database connections opened, not applying for mongo
"sqlMaxIdleConns": 10, // maximum database connections idle, not applying for mongo
"sqlLogLevel": 3, // sql logger verbosity: 1=Silent, 2=Error, 3=Warn, 4=Info
@@ -209,11 +198,8 @@ const CGRATES_CFG_JSON = `
//"pgSSLCertMode": "allow", // determines whether a client certificate may be sent to the server, and whether the server is required to request one
//"pgSSLRootCert": "", // name of a file containing SSL certificate authority (CA) certificate(s)
"mysqlLocation": "Local", // the location the time from mysql is retrived
- "mongoQueryTimeout":"10s", // timeout for query when mongo is used
- "mongoConnScheme": "mongodb" // scheme for MongoDB connection
- },
- "items":{
- "*cdrs": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false}
+ "mongoQueryTimeout": "10s", // timeout for query when mongo is used
+ "mongoConnScheme": "mongodb" // scheme for MongoDB connection
}
},
@@ -1169,7 +1155,7 @@ const CGRATES_CFG_JSON = `
"resources": {
"enabled": false, // starts ResourceLimiter service:
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"thresholds_conns": [], // connections to ThresholdS for resource reporting, empty to disable thresholds functionality: <""|*internal|$rpc_conns_id>
"indexed_selects": true, // enable profile matching exclusively on indexes
//"string_indexed_fields": [], // query indexes based on these fields for faster processing
@@ -1205,7 +1191,7 @@ const CGRATES_CFG_JSON = `
"ips": {
"enabled": false, // enables the IPs service:
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"indexed_selects": true, // enable profile matching exclusively on indexes
//"string_indexed_fields": [], // query indexes based on these fields for faster processing
"prefix_indexed_fields": [], // query indexes based on these fields for faster processing
@@ -1241,7 +1227,7 @@ const CGRATES_CFG_JSON = `
"stats": {
"enabled": false, // starts Stat service:
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"store_uncompressed_limit": 0, // used to compress data
"thresholds_conns": [], // connections to ThresholdS for StatUpdates, empty to disable thresholds functionality: <""|*internal|$rpc_conns_id>
"ees_conns": [], // connections to EEs for StatUpdates, empty to disable export functionality: <""|*internal|$rpc_conns_id>
@@ -1281,7 +1267,7 @@ const CGRATES_CFG_JSON = `
"thresholds": {
"enabled": false, // starts ThresholdS service:
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"indexed_selects": true, // enable profile matching exclusively on indexes
//"string_indexed_fields": [], // query indexes based on these fields for faster processing
"prefix_indexed_fields": [], // query indexes based on these fields for faster processing
@@ -1313,7 +1299,7 @@ const CGRATES_CFG_JSON = `
"trends":{
"enabled": false, // starts TrendS service: .
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"store_uncompressed_limit": 0, // used to compress metrics
"stats_conns": [], // connections to StatS ,empty to disable stats functionality: <""|*internal|$rpc_conns_id>
"scheduled_ids": {}, // autoload these trend ids on start
@@ -1325,7 +1311,7 @@ const CGRATES_CFG_JSON = `
"rankings":{
"enabled": false, // starts RankingS service: .
- "store_interval": "", // dump cache regularly to dataDB, 0 - dump at start/shutdown: <""|$dur>
+ "store_interval": "", // dump cache regularly to db, 0 - dump at start/shutdown: <""|$dur>
"stats_conns": [], // connections to StatS ,empty to disable stats functionality: <""|*internal|$rpc_conns_id>
"scheduled_ids": {}, // autoload these ranking ids on start
"thresholds_conns": [], // connections to ThresholdS for rankingSummary reporting, empty to disable thresholds functionality: <""|*internal|$rpc_conns_id>
@@ -1732,15 +1718,16 @@ const CGRATES_CFG_JSON = `
"migrator": {
- "out_datadb_type": "redis",
- "out_datadb_host": "127.0.0.1",
- "out_datadb_port": "6379",
- "out_datadb_name": "10",
- "out_datadb_user": "cgrates",
- "out_datadb_password": "",
- "out_datadb_encoding" : "msgpack",
"users_filters":[],
- "out_datadb_opts":{
+ "fromItems":{
+ "*accounts": {"dbConn": "*default"},
+ "*statqueue_profiles": {"dbConn": "*default"},
+ "*filters": {"dbConn": "*default"},
+ "*charger_profiles": {"dbConn": "*default"},
+ "*load_ids": {"dbConn": "*default"},
+ "*versions": {"dbConn": "*default"}
+ },
+ "out_db_opts":{
"redisMaxConns": 10, // the connection pool size
"redisConnectAttempts": 20, // the maximum amount of dial attempts
"redisSentinel": "",
@@ -2143,12 +2130,12 @@ const CGRATES_CFG_JSON = `
"config_db": { // database used to store runtime data (eg: accounts)
- "db_type": "*internal", // data_db type: <*redis|*mongo>
- "db_host": "", // data_db host address
- "db_port": 0, // data_db port to reach the database
- "db_name": "", // data_db database name to connect to
- "db_user": "", // username to use when connecting to data_db
- "db_password": "", // password to use when connecting to data_db
+ "db_type": "*internal", // db type: <*internal|*redis|*mongo>
+ "db_host": "", // db host address
+ "db_port": 0, // db port to reach the database
+ "db_name": "", // db database name to connect to
+ "db_user": "", // username to use when connecting to db
+ "db_password": "", // password to use when connecting to db
"opts":{
"internalDBDumpPath": "/var/lib/cgrates/internal_db/configdb", // the path where configdb will be dumped
"internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/configdb", // default path taken by ConfigSv1.BackupConfigDB when "BackupFolderPath" is not provided
diff --git a/config/config_json.go b/config/config_json.go
index ec57cdf51..16228eb31 100644
--- a/config/config_json.go
+++ b/config/config_json.go
@@ -31,7 +31,7 @@ const (
CacheJSON = "caches"
ListenJSON = "listen"
HTTPJSON = "http"
- DataDBJSON = "data_db"
+ DBJSON = "db"
StorDBJSON = "stor_db"
FilterSJSON = "filters"
CDRsJSON = "cdrs"
@@ -105,8 +105,7 @@ var (
PrometheusAgentJSON: utils.PrometheusAgent,
LoaderSJSON: utils.LoaderS,
AnalyzerSJSON: utils.AnalyzerS,
- DataDBJSON: utils.DataDB,
- StorDBJSON: utils.StorDB,
+ DBJSON: utils.DB,
EEsJSON: utils.EEs,
EFsJSON: utils.EFs,
RateSJSON: utils.RateS,
@@ -184,8 +183,7 @@ func newSections(cfg *CGRConfig) Sections {
cfg.loggerCfg,
cfg.efsCfg,
cfg.rpcConns,
- cfg.dataDbCfg,
- cfg.storDbCfg,
+ cfg.dbCfg,
cfg.listenCfg,
cfg.tlsCfg,
cfg.httpCfg,
diff --git a/config/configdbcfg.go b/config/configdbcfg.go
index e331cdb06..10ae50a88 100644
--- a/config/configdbcfg.go
+++ b/config/configdbcfg.go
@@ -35,12 +35,23 @@ type ConfigDBCfg struct {
Name string // The name of the database to connect to.
User string // The user to sign in as.
Password string // The user's password.
- Opts *DataDBOpts
+ Opts *DBOpts
+}
+
+// Database config for config_db
+type ConfigDbJsonCfg struct {
+ Db_type *string
+ Db_host *string
+ Db_port *int
+ Db_name *string
+ Db_user *string
+ Db_password *string
+ Opts *DBOptsJson
}
// loadConfigDBCfg loads the DataDB section of the configuration
func (dbcfg *ConfigDBCfg) Load(ctx *context.Context, jsnCfg ConfigDB, _ *CGRConfig) (err error) {
- jsnConfigDBCfg := new(DbJsonCfg)
+ jsnConfigDBCfg := new(ConfigDbJsonCfg)
if err = jsnCfg.GetSection(ctx, ConfigDBJSON, jsnConfigDBCfg); err != nil {
return
}
@@ -51,7 +62,7 @@ func (dbcfg *ConfigDBCfg) Load(ctx *context.Context, jsnCfg ConfigDB, _ *CGRConf
}
// loadFromJSONCfg loads Database config from JsonCfg
-func (dbcfg *ConfigDBCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) {
+func (dbcfg *ConfigDBCfg) loadFromJSONCfg(jsnDbCfg *ConfigDbJsonCfg) (err error) {
if jsnDbCfg == nil {
return nil
}
diff --git a/config/configsanity.go b/config/configsanity.go
index 55ecfe701..b7550161c 100644
--- a/config/configsanity.go
+++ b/config/configsanity.go
@@ -1024,70 +1024,88 @@ func (cfg *CGRConfig) checkConfigSanity() error {
}
}
- // StorDB sanity checks
- if cfg.storDbCfg.Type == utils.MetaInternal &&
- (cfg.storDbCfg.Opts.InternalDBDumpInterval != 0 ||
- cfg.storDbCfg.Opts.InternalDBRewriteInterval != 0) &&
- cfg.storDbCfg.Opts.InternalDBFileSizeLimit <= 0 {
- return fmt.Errorf("<%s> InternalDBFileSizeLimit field cannot be equal or smaller than 0: <%v>", utils.StorDB,
- cfg.storDbCfg.Opts.InternalDBFileSizeLimit)
- }
- if cfg.storDbCfg.Type == utils.MetaPostgres {
- if !slices.Contains([]string{utils.PgSSLModeDisable, utils.PgSSLModeAllow,
- utils.PgSSLModePrefer, utils.PgSSLModeRequire, utils.PgSSLModeVerifyCA,
- utils.PgSSLModeVerifyFull}, cfg.storDbCfg.Opts.PgSSLMode) {
- return fmt.Errorf("<%s> unsupported pgSSLMode (sslmode) in storDB configuration", utils.StorDB)
- }
- if !slices.Contains([]string{utils.PgSSLModeDisable, utils.PgSSLModeAllow, utils.PgSSLModeRequire,
- utils.EmptyString}, cfg.storDbCfg.Opts.PgSSLCertMode) {
- return fmt.Errorf("<%s> unsupported pgSSLCertMode (sslcertmode) in storDB configuration", utils.StorDB)
- }
- }
-
// DataDB sanity checks
- if cfg.dataDbCfg.Type == utils.MetaInternal {
- if (cfg.dataDbCfg.Opts.InternalDBDumpInterval != 0 ||
- cfg.dataDbCfg.Opts.InternalDBRewriteInterval != 0) &&
- cfg.dataDbCfg.Opts.InternalDBFileSizeLimit <= 0 {
- return fmt.Errorf("<%s> InternalDBFileSizeLimit field cannot be equal or smaller than 0: <%v>", utils.DataDB,
- cfg.dataDbCfg.Opts.InternalDBFileSizeLimit)
+ hasOneInternalDB := false // used to reutrn error in case more then 1 internaldb is found
+ for _, dbcfg := range cfg.dbCfg.DBConns {
+ if dbcfg.Type == utils.MetaInternal {
+ if hasOneInternalDB {
+ return fmt.Errorf("<%s> There can only be 1 internal DataDB", utils.DB)
+ }
+ if (cfg.dbCfg.Opts.InternalDBDumpInterval != 0 ||
+ cfg.dbCfg.Opts.InternalDBRewriteInterval != 0) &&
+ cfg.dbCfg.Opts.InternalDBFileSizeLimit <= 0 {
+ return fmt.Errorf("<%s> InternalDBFileSizeLimit field cannot be equal or smaller than 0: <%v>", utils.DB,
+ cfg.dbCfg.Opts.InternalDBFileSizeLimit)
+ }
+ hasOneInternalDB = true
}
- for key, config := range cfg.cacheCfg.Partitions {
- if utils.StatelessDataDBPartitions.Has(key) && config.Limit != 0 {
- return fmt.Errorf("<%s> %s needs to be 0 when DataBD is *internal, received : %d", utils.CacheS, key, config.Limit)
+ if dbcfg.Type == utils.MetaPostgres {
+ if !slices.Contains([]string{utils.PgSSLModeDisable, utils.PgSSLModeAllow,
+ utils.PgSSLModePrefer, utils.PgSSLModeRequire, utils.PgSSLModeVerifyCA,
+ utils.PgSSLModeVerifyFull}, cfg.dbCfg.Opts.PgSSLMode) {
+ return fmt.Errorf("<%s> unsupported pgSSLMode (sslmode) in DB configuration", utils.DB)
+ }
+ if !slices.Contains([]string{utils.PgSSLModeDisable, utils.PgSSLModeAllow, utils.PgSSLModeRequire,
+ utils.EmptyString}, cfg.dbCfg.Opts.PgSSLCertMode) {
+ return fmt.Errorf("<%s> unsupported pgSSLCertMode (sslcertmode) in DB configuration", utils.DB)
}
}
}
- for item, val := range cfg.dataDbCfg.Items {
- if val.Remote && len(cfg.dataDbCfg.RmtConns) == 0 {
+ for item, val := range cfg.dbCfg.Items {
+ if _, has := cfg.dbCfg.DBConns[val.DBConn]; !has {
+ return fmt.Errorf("item's <%s> dbConn <%v>, does not match any db_conns ID", item, val.DBConn)
+ }
+ storDBTypes := []string{utils.MetaInternal, utils.MetaMySQL, utils.MetaMongo,
+ utils.MetaPostgres, utils.Internal, utils.MySQL, utils.Mongo, utils.Postgres}
+ dataDBTypes := []string{utils.MetaInternal, utils.MetaRedis, utils.MetaMongo,
+ utils.Internal, utils.Redis, utils.Mongo}
+
+ // if *cdrs item db type is not supported, return error
+ if item == utils.MetaCDRs {
+ if !slices.Contains(storDBTypes, cfg.dbCfg.DBConns[val.DBConn].Type) {
+ return fmt.Errorf("<%s> db item can only be of types <%v>, got <%s>", item,
+ storDBTypes, cfg.dbCfg.DBConns[val.DBConn].Type)
+ }
+ } else {
+ if !slices.Contains(dataDBTypes, cfg.dbCfg.DBConns[val.DBConn].Type) {
+ return fmt.Errorf("<%s> db item can only be of types <%v>, got <%s>", item, dataDBTypes, cfg.dbCfg.DBConns[val.DBConn].Type)
+ }
+ }
+ found1RmtConns := false
+ found1RplConns := false
+ for _, dbcfg := range cfg.dbCfg.DBConns {
+ for _, connID := range dbcfg.RplConns {
+ conn, has := cfg.rpcConns[connID]
+ if !has {
+ return fmt.Errorf("<%s> connection with id: <%s> not defined", utils.DB, connID)
+ }
+ for _, rpc := range conn.Conns {
+ if rpc.Transport != utils.MetaGOB {
+ return fmt.Errorf("<%s> unsupported transport <%s> for connection with ID: <%s>", utils.DB, rpc.Transport, connID)
+ }
+ }
+ found1RplConns = true
+ }
+ for _, connID := range dbcfg.RmtConns {
+ conn, has := cfg.rpcConns[connID]
+ if !has {
+ return fmt.Errorf("<%s> connection with id: <%s> not defined", utils.DB, connID)
+ }
+ for _, rpc := range conn.Conns {
+ if rpc.Transport != utils.MetaGOB {
+ return fmt.Errorf("<%s> unsupported transport <%s> for connection with ID: <%s>", utils.DB, rpc.Transport, connID)
+ }
+ }
+ found1RmtConns = true
+ }
+ }
+ if val.Remote && !found1RmtConns {
return fmt.Errorf("remote connections required by: <%s>", item)
}
- if val.Replicate && len(cfg.dataDbCfg.RplConns) == 0 {
+ if val.Replicate && !found1RplConns {
return fmt.Errorf("replicate connections required by: <%s>", item)
}
}
- for _, connID := range cfg.dataDbCfg.RplConns {
- conn, has := cfg.rpcConns[connID]
- if !has {
- return fmt.Errorf("<%s> connection with id: <%s> not defined", utils.DataDB, connID)
- }
- for _, rpc := range conn.Conns {
- if rpc.Transport != utils.MetaGOB {
- return fmt.Errorf("<%s> unsupported transport <%s> for connection with ID: <%s>", utils.DataDB, rpc.Transport, connID)
- }
- }
- }
- for _, connID := range cfg.dataDbCfg.RmtConns {
- conn, has := cfg.rpcConns[connID]
- if !has {
- return fmt.Errorf("<%s> connection with id: <%s> not defined", utils.DataDB, connID)
- }
- for _, rpc := range conn.Conns {
- if rpc.Transport != utils.MetaGOB {
- return fmt.Errorf("<%s> unsupported transport <%s> for connection with ID: <%s>", utils.DataDB, rpc.Transport, connID)
- }
- }
- }
// APIer sanity checks
for _, connID := range cfg.admS.AttributeSConns {
if strings.HasPrefix(connID, utils.MetaInternal) && !cfg.attributeSCfg.Enabled {
diff --git a/config/datadbcfg.go b/config/datadbcfg.go
index 3014dc5af..df8e824ee 100644
--- a/config/datadbcfg.go
+++ b/config/datadbcfg.go
@@ -20,6 +20,7 @@ package config
import (
"fmt"
+ "reflect"
"slices"
"strconv"
"strings"
@@ -48,7 +49,7 @@ func defaultDBPort(dbType, port string) string {
return port
}
-type DataDBOpts struct {
+type DBOpts struct {
InternalDBDumpPath string // Path to the dump file
InternalDBBackupPath string // Path where db dump will backup
InternalDBStartTimeout time.Duration // Transcache recover from dump files timeout duration
@@ -72,48 +73,141 @@ type DataDBOpts struct {
RedisCACertificate string
MongoQueryTimeout time.Duration
MongoConnScheme string
+ SQLMaxOpenConns int
+ SQLMaxIdleConns int
+ SQLLogLevel int
+ SQLConnMaxLifetime time.Duration
+ SQLDSNParams map[string]string
+ PgSSLMode string
+ PgSSLCert string
+ PgSSLKey string
+ PgSSLPassword string
+ PgSSLCertMode string
+ PgSSLRootCert string
+ MySQLLocation string
}
-// DataDbCfg Database config
-type DataDbCfg struct {
- Type string
- Host string // The host to connect to. Values that start with / are for UNIX domain sockets.
- Port string // The port to bind to.
- Name string // The name of the database to connect to.
- User string // The user to sign in as.
- Password string // The user's password.
- RmtConns []string // Remote DataDB connIDs
- RmtConnID string
- RplConns []string // Replication connIDs
- RplFiltered bool
- RplCache string
- Items map[string]*ItemOpts
- Opts *DataDBOpts
+// DBConn the config to establish connection to DataDB
+type DBConn struct {
+ Type string
+ Host string // The host to connect to. Values that start with / are for UNIX domain sockets.
+ Port string // The port to bind to.
+ Name string // The name of the database to connect to.
+ User string // The user to sign in as.
+ Password string // The user's password.
+ StringIndexedFields []string
+ PrefixIndexedFields []string
+ RmtConns []string // Remote DataDB connIDs
+ RmtConnID string
+ RplConns []string // Replication connIDs
+ RplFiltered bool
+ RplCache string
+}
+
+// loadFromJSONCfg load the DBConn section of the DataDBCfg
+func (dbC *DBConn) loadFromJSONCfg(jsnDbConnCfg *DbConnJson) (err error) {
+ if dbC == nil {
+ return
+ }
+ if jsnDbConnCfg.Db_type != nil {
+ if !strings.HasPrefix(*jsnDbConnCfg.Db_type, "*") {
+ dbC.Type = fmt.Sprintf("*%v", *jsnDbConnCfg.Db_type)
+ } else {
+ dbC.Type = *jsnDbConnCfg.Db_type
+ }
+ }
+ if jsnDbConnCfg.Db_host != nil {
+ dbC.Host = *jsnDbConnCfg.Db_host
+ }
+ if jsnDbConnCfg.Db_port != nil {
+ port := strconv.Itoa(*jsnDbConnCfg.Db_port)
+ if port == "-1" {
+ port = utils.MetaDynamic
+ }
+ dbC.Port = defaultDBPort(dbC.Type, port)
+ }
+ if jsnDbConnCfg.Db_name != nil {
+ dbC.Name = *jsnDbConnCfg.Db_name
+ }
+ if jsnDbConnCfg.Db_user != nil {
+ dbC.User = *jsnDbConnCfg.Db_user
+ }
+ if jsnDbConnCfg.Db_password != nil {
+ dbC.Password = *jsnDbConnCfg.Db_password
+ }
+ if jsnDbConnCfg.String_indexed_fields != nil {
+ dbC.StringIndexedFields = *jsnDbConnCfg.String_indexed_fields
+ }
+ if jsnDbConnCfg.Prefix_indexed_fields != nil {
+ dbC.PrefixIndexedFields = *jsnDbConnCfg.Prefix_indexed_fields
+ }
+ if jsnDbConnCfg.Remote_conns != nil {
+ dbC.RmtConns = make([]string, len(*jsnDbConnCfg.Remote_conns))
+ for idx, rmtConn := range *jsnDbConnCfg.Remote_conns {
+ if rmtConn == utils.MetaInternal {
+ return fmt.Errorf("remote connection ID needs to be different than <%s> ", utils.MetaInternal)
+ }
+ dbC.RmtConns[idx] = rmtConn
+ }
+ }
+ if jsnDbConnCfg.Replication_conns != nil {
+ dbC.RplConns = make([]string, len(*jsnDbConnCfg.Replication_conns))
+ for idx, rplConn := range *jsnDbConnCfg.Replication_conns {
+ if rplConn == utils.MetaInternal {
+ return fmt.Errorf("remote connection ID needs to be different than <%s> ", utils.MetaInternal)
+ }
+ dbC.RplConns[idx] = rplConn
+ }
+ }
+ if jsnDbConnCfg.Replication_filtered != nil {
+ dbC.RplFiltered = *jsnDbConnCfg.Replication_filtered
+ }
+ if jsnDbConnCfg.Remote_conn_id != nil {
+ dbC.RmtConnID = *jsnDbConnCfg.Remote_conn_id
+ }
+ if jsnDbConnCfg.Replication_cache != nil {
+ dbC.RplCache = *jsnDbConnCfg.Replication_cache
+ }
+ return
+}
+
+// DBConns the config for all DataDB connections
+type DBConns map[string]*DBConn
+
+// DbCfg Database config
+type DbCfg struct {
+ DBConns DBConns
+ Items map[string]*ItemOpts
+ Opts *DBOpts
}
// loadDataDBCfg loads the DataDB section of the configuration
-func (dbcfg *DataDbCfg) Load(ctx *context.Context, jsnCfg ConfigDB, cfg *CGRConfig) (err error) {
- jsnDataDbCfg := new(DbJsonCfg)
- if err = jsnCfg.GetSection(ctx, DataDBJSON, jsnDataDbCfg); err != nil {
+func (dbcfg *DbCfg) Load(ctx *context.Context, jsnCfg ConfigDB, cfg *CGRConfig) (err error) {
+ jsnDbCfg := new(DbJsonCfg)
+ if err = jsnCfg.GetSection(ctx, DBJSON, jsnDbCfg); err != nil {
return
}
- if err = dbcfg.loadFromJSONCfg(jsnDataDbCfg); err != nil {
+ if err = dbcfg.loadFromJSONCfg(jsnDbCfg); err != nil {
return
}
// in case of internalDB we need to disable the cache
// so we enforce it here
- if cfg.dataDbCfg.Type == utils.MetaInternal {
+ for _, dbCfg := range cfg.dbCfg.DBConns {
+ if dbCfg.Type != utils.MetaInternal {
+ continue
+ }
// overwrite only StatelessDataDBPartitions and leave other unmodified ( e.g. *diameter_messages, *closed_sessions, etc... )
for key := range utils.StatelessDataDBPartitions {
if _, has := cfg.cacheCfg.Partitions[key]; has {
cfg.cacheCfg.Partitions[key] = &CacheParamCfg{}
}
}
+ return // there is only 1 internalDB, stop searching for more
}
return
}
-func (dbOpts *DataDBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) {
+func (dbOpts *DBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) {
if jsnCfg == nil {
return
}
@@ -208,56 +302,63 @@ func (dbOpts *DataDBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) {
if jsnCfg.MongoConnScheme != nil {
dbOpts.MongoConnScheme = *jsnCfg.MongoConnScheme
}
+ if jsnCfg.SQLMaxOpenConns != nil {
+ dbOpts.SQLMaxOpenConns = *jsnCfg.SQLMaxOpenConns
+ }
+ if jsnCfg.SQLMaxIdleConns != nil {
+ dbOpts.SQLMaxIdleConns = *jsnCfg.SQLMaxIdleConns
+ }
+ if jsnCfg.SQLLogLevel != nil {
+ dbOpts.SQLLogLevel = *jsnCfg.SQLLogLevel
+ }
+ if jsnCfg.SQLConnMaxLifetime != nil {
+ if dbOpts.SQLConnMaxLifetime, err = utils.ParseDurationWithNanosecs(*jsnCfg.SQLConnMaxLifetime); err != nil {
+ return
+ }
+ }
+ if jsnCfg.MYSQLDSNParams != nil {
+ dbOpts.SQLDSNParams = make(map[string]string)
+ dbOpts.SQLDSNParams = jsnCfg.MYSQLDSNParams
+ }
+ if jsnCfg.PgSSLMode != nil {
+ dbOpts.PgSSLMode = *jsnCfg.PgSSLMode
+ }
+ if jsnCfg.PgSSLCert != nil {
+ dbOpts.PgSSLCert = *jsnCfg.PgSSLCert
+ }
+ if jsnCfg.PgSSLKey != nil {
+ dbOpts.PgSSLKey = *jsnCfg.PgSSLKey
+ }
+ if jsnCfg.PgSSLPassword != nil {
+ dbOpts.PgSSLPassword = *jsnCfg.PgSSLPassword
+ }
+ if jsnCfg.PgSSLCertMode != nil {
+ dbOpts.PgSSLCertMode = *jsnCfg.PgSSLCertMode
+ }
+ if jsnCfg.PgSSLRootCert != nil {
+ dbOpts.PgSSLRootCert = *jsnCfg.PgSSLRootCert
+ }
+ if jsnCfg.MySQLLocation != nil {
+ dbOpts.MySQLLocation = *jsnCfg.MySQLLocation
+ }
return
}
// loadFromJSONCfg loads Database config from JsonCfg
-func (dbcfg *DataDbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) {
+func (dbcfg *DbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) {
if jsnDbCfg == nil {
return nil
}
- if jsnDbCfg.Db_type != nil {
- if !strings.HasPrefix(*jsnDbCfg.Db_type, "*") {
- dbcfg.Type = fmt.Sprintf("*%v", *jsnDbCfg.Db_type)
- } else {
- dbcfg.Type = *jsnDbCfg.Db_type
+ if jsnDbCfg.Db_conns != nil {
+ // hardcoded *default connection to internal, can be overwritten
+ dbcfg.DBConns[utils.MetaDefault] = &DBConn{
+ Type: utils.MetaInternal,
}
- }
- if jsnDbCfg.Db_host != nil {
- dbcfg.Host = *jsnDbCfg.Db_host
- }
- if jsnDbCfg.Db_port != nil {
- port := strconv.Itoa(*jsnDbCfg.Db_port)
- if port == "-1" {
- port = utils.MetaDynamic
- }
- dbcfg.Port = defaultDBPort(dbcfg.Type, port)
- }
- if jsnDbCfg.Db_name != nil {
- dbcfg.Name = *jsnDbCfg.Db_name
- }
- if jsnDbCfg.Db_user != nil {
- dbcfg.User = *jsnDbCfg.Db_user
- }
- if jsnDbCfg.Db_password != nil {
- dbcfg.Password = *jsnDbCfg.Db_password
- }
- if jsnDbCfg.Remote_conns != nil {
- dbcfg.RmtConns = make([]string, len(*jsnDbCfg.Remote_conns))
- for idx, rmtConn := range *jsnDbCfg.Remote_conns {
- if rmtConn == utils.MetaInternal {
- return fmt.Errorf("Remote connection ID needs to be different than <%s> ", utils.MetaInternal)
+ for kJsn, vJsn := range jsnDbCfg.Db_conns {
+ dbcfg.DBConns[kJsn] = new(DBConn)
+ if err = dbcfg.DBConns[kJsn].loadFromJSONCfg(vJsn); err != nil {
+ return err
}
- dbcfg.RmtConns[idx] = rmtConn
- }
- }
- if jsnDbCfg.Replication_conns != nil {
- dbcfg.RplConns = make([]string, len(*jsnDbCfg.Replication_conns))
- for idx, rplConn := range *jsnDbCfg.Replication_conns {
- if rplConn == utils.MetaInternal {
- return fmt.Errorf("Remote connection ID needs to be different than <%s> ", utils.MetaInternal)
- }
- dbcfg.RplConns[idx] = rplConn
}
}
if jsnDbCfg.Items != nil {
@@ -272,26 +373,18 @@ func (dbcfg *DataDbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) {
dbcfg.Items[kJsn] = val
}
}
- if jsnDbCfg.Replication_filtered != nil {
- dbcfg.RplFiltered = *jsnDbCfg.Replication_filtered
- }
- if jsnDbCfg.Remote_conn_id != nil {
- dbcfg.RmtConnID = *jsnDbCfg.Remote_conn_id
- }
- if jsnDbCfg.Replication_cache != nil {
- dbcfg.RplCache = *jsnDbCfg.Replication_cache
- }
+
if jsnDbCfg.Opts != nil {
err = dbcfg.Opts.loadFromJSONCfg(jsnDbCfg.Opts)
}
return
}
-func (DataDbCfg) SName() string { return DataDBJSON }
-func (dbcfg DataDbCfg) CloneSection() Section { return dbcfg.Clone() }
+func (DbCfg) SName() string { return DBJSON }
+func (dbcfg DbCfg) CloneSection() Section { return dbcfg.Clone() }
-func (dbOpts *DataDBOpts) Clone() *DataDBOpts {
- return &DataDBOpts{
+func (dbOpts *DBOpts) Clone() *DBOpts {
+ return &DBOpts{
InternalDBDumpPath: dbOpts.InternalDBDumpPath,
InternalDBBackupPath: dbOpts.InternalDBBackupPath,
InternalDBStartTimeout: dbOpts.InternalDBStartTimeout,
@@ -315,38 +408,67 @@ func (dbOpts *DataDBOpts) Clone() *DataDBOpts {
RedisCACertificate: dbOpts.RedisCACertificate,
MongoQueryTimeout: dbOpts.MongoQueryTimeout,
MongoConnScheme: dbOpts.MongoConnScheme,
+ SQLMaxOpenConns: dbOpts.SQLMaxOpenConns,
+ SQLMaxIdleConns: dbOpts.SQLMaxIdleConns,
+ SQLLogLevel: dbOpts.SQLLogLevel,
+ SQLConnMaxLifetime: dbOpts.SQLConnMaxLifetime,
+ SQLDSNParams: dbOpts.SQLDSNParams,
+ PgSSLMode: dbOpts.PgSSLMode,
+ PgSSLCert: dbOpts.PgSSLCert,
+ PgSSLKey: dbOpts.PgSSLKey,
+ PgSSLPassword: dbOpts.PgSSLPassword,
+ PgSSLCertMode: dbOpts.PgSSLCertMode,
+ PgSSLRootCert: dbOpts.PgSSLRootCert,
+ MySQLLocation: dbOpts.MySQLLocation,
}
}
// Clone returns the cloned object
-func (dbcfg DataDbCfg) Clone() (cln *DataDbCfg) {
- cln = &DataDbCfg{
- Type: dbcfg.Type,
- Host: dbcfg.Host,
- Port: dbcfg.Port,
- Name: dbcfg.Name,
- User: dbcfg.User,
- Password: dbcfg.Password,
- RplFiltered: dbcfg.RplFiltered,
- RplCache: dbcfg.RplCache,
- RmtConnID: dbcfg.RmtConnID,
- Items: make(map[string]*ItemOpts),
- Opts: dbcfg.Opts.Clone(),
+func (dbcfg DbCfg) Clone() (cln *DbCfg) {
+ cln = &DbCfg{
+ DBConns: make(DBConns),
+ Items: make(map[string]*ItemOpts),
+ Opts: dbcfg.Opts.Clone(),
+ }
+ for k, v := range dbcfg.DBConns {
+ cln.DBConns[k] = v.Clone()
}
for k, itm := range dbcfg.Items {
cln.Items[k] = itm.Clone()
}
- if dbcfg.RmtConns != nil {
- cln.RmtConns = slices.Clone(dbcfg.RmtConns)
+ return
+}
+
+// Clone returns the cloned object
+func (dbC *DBConn) Clone() (cln *DBConn) {
+ cln = &DBConn{
+ Type: dbC.Type,
+ Host: dbC.Host,
+ Port: dbC.Port,
+ Name: dbC.Name,
+ User: dbC.User,
+ Password: dbC.Password,
+ RplFiltered: dbC.RplFiltered,
+ RplCache: dbC.RplCache,
+ RmtConnID: dbC.RmtConnID,
}
- if dbcfg.RplConns != nil {
- cln.RplConns = slices.Clone(dbcfg.RplConns)
+ if dbC.StringIndexedFields != nil {
+ cln.StringIndexedFields = slices.Clone(dbC.StringIndexedFields)
+ }
+ if dbC.PrefixIndexedFields != nil {
+ cln.PrefixIndexedFields = slices.Clone(dbC.PrefixIndexedFields)
+ }
+ if dbC.RmtConns != nil {
+ cln.RmtConns = slices.Clone(dbC.RmtConns)
+ }
+ if dbC.RplConns != nil {
+ cln.RplConns = slices.Clone(dbC.RplConns)
}
return
}
// AsMapInterface returns the config as a map[string]any
-func (dbcfg DataDbCfg) AsMapInterface() any {
+func (dbcfg DbCfg) AsMapInterface() any {
opts := map[string]any{
utils.InternalDBDumpPathCfg: dbcfg.Opts.InternalDBDumpPath,
utils.InternalDBBackupPathCfg: dbcfg.Opts.InternalDBBackupPath,
@@ -371,19 +493,52 @@ func (dbcfg DataDbCfg) AsMapInterface() any {
utils.RedisCACertificateCfg: dbcfg.Opts.RedisCACertificate,
utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(),
utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme,
+ utils.SQLMaxOpenConnsCfg: dbcfg.Opts.SQLMaxOpenConns,
+ utils.SQLMaxIdleConnsCfg: dbcfg.Opts.SQLMaxIdleConns,
+ utils.SQLLogLevelCfg: dbcfg.Opts.SQLLogLevel,
+ utils.SQLConnMaxLifetime: dbcfg.Opts.SQLConnMaxLifetime.String(),
+ utils.MYSQLDSNParams: dbcfg.Opts.SQLDSNParams,
+ utils.PgSSLModeCfg: dbcfg.Opts.PgSSLMode,
+ utils.MysqlLocation: dbcfg.Opts.MySQLLocation,
+ }
+ if dbcfg.Opts.PgSSLCert != "" {
+ opts[utils.PgSSLCertCfg] = dbcfg.Opts.PgSSLCert
+ }
+ if dbcfg.Opts.PgSSLKey != "" {
+ opts[utils.PgSSLKeyCfg] = dbcfg.Opts.PgSSLKey
+ }
+ if dbcfg.Opts.PgSSLPassword != "" {
+ opts[utils.PgSSLPasswordCfg] = dbcfg.Opts.PgSSLPassword
+ }
+ if dbcfg.Opts.PgSSLCertMode != "" {
+ opts[utils.PgSSLCertModeCfg] = dbcfg.Opts.PgSSLCertMode
+ }
+ if dbcfg.Opts.PgSSLRootCert != "" {
+ opts[utils.PgSSLRootCertCfg] = dbcfg.Opts.PgSSLRootCert
+ }
+ dbConns := make(map[string]map[string]any)
+ for k, dbc := range dbcfg.DBConns {
+ dbConns[k] = map[string]any{
+ utils.DataDbTypeCfg: dbc.Type,
+ utils.DataDbHostCfg: dbc.Host,
+ utils.DataDbNameCfg: dbc.Name,
+ utils.DataDbUserCfg: dbc.User,
+ utils.DataDbPassCfg: dbc.Password,
+ utils.StringIndexedFieldsCfg: dbc.StringIndexedFields,
+ utils.PrefixIndexedFieldsCfg: dbc.PrefixIndexedFields,
+ utils.RemoteConnsCfg: dbc.RmtConns,
+ utils.RemoteConnIDCfg: dbc.RmtConnID,
+ utils.ReplicationConnsCfg: dbc.RplConns,
+ utils.ReplicationFilteredCfg: dbc.RplFiltered,
+ utils.ReplicationCache: dbc.RplCache,
+ }
+ if dbc.Port != "" {
+ dbConns[k][utils.DataDbPortCfg], _ = strconv.Atoi(dbc.Port)
+ }
}
mp := map[string]any{
- utils.DataDbTypeCfg: dbcfg.Type,
- utils.DataDbHostCfg: dbcfg.Host,
- utils.DataDbNameCfg: dbcfg.Name,
- utils.DataDbUserCfg: dbcfg.User,
- utils.DataDbPassCfg: dbcfg.Password,
- utils.RemoteConnsCfg: dbcfg.RmtConns,
- utils.RemoteConnIDCfg: dbcfg.RmtConnID,
- utils.ReplicationConnsCfg: dbcfg.RplConns,
- utils.ReplicationFilteredCfg: dbcfg.RplFiltered,
- utils.ReplicationCache: dbcfg.RplCache,
- utils.OptsCfg: opts,
+ utils.DataDbConnsCfg: dbConns,
+ utils.OptsCfg: opts,
}
if dbcfg.Items != nil {
items := make(map[string]any)
@@ -392,9 +547,6 @@ func (dbcfg DataDbCfg) AsMapInterface() any {
}
mp[utils.ItemsCfg] = items
}
- if dbcfg.Port != "" {
- mp[utils.DataDbPortCfg], _ = strconv.Atoi(dbcfg.Port)
- }
return mp
}
@@ -405,82 +557,90 @@ type ItemOpts struct {
StaticTTL bool
Remote bool
Replicate bool
+ DBConn string // ID of the DB connection that this item belongs to
+
// used for ArgDispatcher in case we send this to a dispatcher engine
RouteID string
APIKey string
}
// AsMapInterface returns the config as a map[string]any
-func (itm *ItemOpts) AsMapInterface() (initialMP map[string]any) {
+func (iI *ItemOpts) AsMapInterface() (initialMP map[string]any) {
initialMP = map[string]any{
- utils.RemoteCfg: itm.Remote,
- utils.ReplicateCfg: itm.Replicate,
- utils.LimitCfg: itm.Limit,
- utils.StaticTTLCfg: itm.StaticTTL,
+ utils.RemoteCfg: iI.Remote,
+ utils.ReplicateCfg: iI.Replicate,
+ utils.LimitCfg: iI.Limit,
+ utils.StaticTTLCfg: iI.StaticTTL,
+ utils.DBConnCfg: iI.DBConn,
}
- if itm.APIKey != utils.EmptyString {
- initialMP[utils.APIKeyCfg] = itm.APIKey
+ if iI.APIKey != utils.EmptyString {
+ initialMP[utils.APIKeyCfg] = iI.APIKey
}
- if itm.RouteID != utils.EmptyString {
- initialMP[utils.RouteIDCfg] = itm.RouteID
+ if iI.RouteID != utils.EmptyString {
+ initialMP[utils.RouteIDCfg] = iI.RouteID
}
- if itm.TTL != 0 {
- initialMP[utils.TTLCfg] = itm.TTL.String()
+ if iI.TTL != 0 {
+ initialMP[utils.TTLCfg] = iI.TTL.String()
}
return
}
-func (itm *ItemOpts) loadFromJSONCfg(jsonItm *ItemOptsJson) (err error) {
+func (iI *ItemOpts) loadFromJSONCfg(jsonItm *ItemOptsJson) (err error) {
if jsonItm == nil {
return
}
if jsonItm.Limit != nil {
- itm.Limit = *jsonItm.Limit
+ iI.Limit = *jsonItm.Limit
}
if jsonItm.Static_ttl != nil {
- itm.StaticTTL = *jsonItm.Static_ttl
+ iI.StaticTTL = *jsonItm.Static_ttl
}
if jsonItm.Remote != nil {
- itm.Remote = *jsonItm.Remote
+ iI.Remote = *jsonItm.Remote
}
if jsonItm.Replicate != nil {
- itm.Replicate = *jsonItm.Replicate
+ iI.Replicate = *jsonItm.Replicate
+ }
+ if jsonItm.DbConn != nil {
+ iI.DBConn = *jsonItm.DbConn
}
if jsonItm.Route_id != nil {
- itm.RouteID = *jsonItm.Route_id
+ iI.RouteID = *jsonItm.Route_id
}
if jsonItm.Api_key != nil {
- itm.APIKey = *jsonItm.Api_key
+ iI.APIKey = *jsonItm.Api_key
}
if jsonItm.Ttl != nil {
- itm.TTL, err = utils.ParseDurationWithNanosecs(*jsonItm.Ttl)
+ iI.TTL, err = utils.ParseDurationWithNanosecs(*jsonItm.Ttl)
}
return
}
// Clone returns a deep copy of ItemOpt
-func (itm *ItemOpts) Clone() *ItemOpts {
+func (iI *ItemOpts) Clone() *ItemOpts {
return &ItemOpts{
- Limit: itm.Limit,
- TTL: itm.TTL,
- StaticTTL: itm.StaticTTL,
- Remote: itm.Remote,
- Replicate: itm.Replicate,
- APIKey: itm.APIKey,
- RouteID: itm.RouteID,
+ Limit: iI.Limit,
+ TTL: iI.TTL,
+ StaticTTL: iI.StaticTTL,
+ Remote: iI.Remote,
+ Replicate: iI.Replicate,
+ DBConn: iI.DBConn,
+ APIKey: iI.APIKey,
+ RouteID: iI.RouteID,
}
}
-func (itm *ItemOpts) Equals(itm2 *ItemOpts) bool {
- return itm == nil && itm2 == nil ||
- itm != nil && itm2 != nil &&
- itm.Remote == itm2.Remote &&
- itm.Replicate == itm2.Replicate &&
- itm.RouteID == itm2.RouteID &&
- itm.APIKey == itm2.APIKey &&
- itm.Limit == itm2.Limit &&
- itm.TTL == itm2.TTL &&
- itm.StaticTTL == itm2.StaticTTL
+func (iI *ItemOpts) Equals(itm2 *ItemOpts) bool {
+ return iI == nil && itm2 == nil ||
+ iI != nil && itm2 != nil &&
+ iI.Remote == itm2.Remote &&
+ iI.Replicate == itm2.Replicate &&
+ iI.RouteID == itm2.RouteID &&
+ iI.APIKey == itm2.APIKey &&
+ iI.Limit == itm2.Limit &&
+ iI.TTL == itm2.TTL &&
+ iI.DBConn == itm2.DBConn &&
+ iI.StaticTTL == itm2.StaticTTL
}
type ItemOptsJson struct {
@@ -489,6 +649,7 @@ type ItemOptsJson struct {
Static_ttl *bool
Remote *bool
Replicate *bool
+ DbConn *string
// used for ArgDispatcher in case we send this to a dispatcher engine
Route_id *string
Api_key *string
@@ -513,6 +674,9 @@ func diffItemOptJson(d *ItemOptsJson, v1, v2 *ItemOpts) *ItemOptsJson {
if v2.TTL != v1.TTL {
d.Ttl = utils.StringPointer(v2.TTL.String())
}
+ if v2.DBConn != v1.DBConn {
+ d.DbConn = utils.StringPointer(v2.DBConn)
+ }
if v2.RouteID != v1.RouteID {
d.Route_id = utils.StringPointer(v2.RouteID)
}
@@ -574,8 +738,7 @@ type DBOptsJson struct {
MySQLLocation *string `json:"mysqlLocation"`
}
-// Database config
-type DbJsonCfg struct {
+type DbConnJson struct {
Db_type *string
Db_host *string
Db_port *int
@@ -589,11 +752,18 @@ type DbJsonCfg struct {
Replication_conns *[]string
Replication_filtered *bool
Replication_cache *string
- Items map[string]*ItemOptsJson
- Opts *DBOptsJson
}
-func diffDataDBOptsJsonCfg(d *DBOptsJson, v1, v2 *DataDBOpts) *DBOptsJson {
+type DbConnsJson map[string]*DbConnJson
+
+// Database config
+type DbJsonCfg struct {
+ Db_conns DbConnsJson
+ Items map[string]*ItemOptsJson
+ Opts *DBOptsJson
+}
+
+func diffDataDBOptsJsonCfg(d *DBOptsJson, v1, v2 *DBOpts) *DBOptsJson {
if d == nil {
d = new(DBOptsJson)
}
@@ -666,12 +836,48 @@ func diffDataDBOptsJsonCfg(d *DBOptsJson, v1, v2 *DataDBOpts) *DBOptsJson {
if v1.MongoConnScheme != v2.MongoConnScheme {
d.MongoConnScheme = utils.StringPointer(v2.MongoConnScheme)
}
+ if v1.SQLMaxOpenConns != v2.SQLMaxOpenConns {
+ d.SQLMaxOpenConns = utils.IntPointer(v2.SQLMaxOpenConns)
+ }
+ if v1.SQLMaxIdleConns != v2.SQLMaxIdleConns {
+ d.SQLMaxIdleConns = utils.IntPointer(v2.SQLMaxIdleConns)
+ }
+ if v1.SQLLogLevel != v2.SQLLogLevel {
+ d.SQLLogLevel = utils.IntPointer(v2.SQLLogLevel)
+ }
+ if v1.SQLConnMaxLifetime != v2.SQLConnMaxLifetime {
+ d.SQLConnMaxLifetime = utils.StringPointer(v2.SQLConnMaxLifetime.String())
+ }
+ if !reflect.DeepEqual(v1.SQLDSNParams, v2.SQLDSNParams) {
+ d.MYSQLDSNParams = v2.SQLDSNParams
+ }
+ if v1.PgSSLMode != v2.PgSSLMode {
+ d.PgSSLMode = utils.StringPointer(v2.PgSSLMode)
+ }
+ if v1.PgSSLCert != v2.PgSSLCert {
+ d.PgSSLCert = utils.StringPointer(v2.PgSSLCert)
+ }
+ if v1.PgSSLKey != v2.PgSSLKey {
+ d.PgSSLKey = utils.StringPointer(v2.PgSSLKey)
+ }
+ if v1.PgSSLPassword != v2.PgSSLPassword {
+ d.PgSSLPassword = utils.StringPointer(v2.PgSSLPassword)
+ }
+ if v1.PgSSLCertMode != v2.PgSSLCertMode {
+ d.PgSSLCertMode = utils.StringPointer(v2.PgSSLCertMode)
+ }
+ if v1.PgSSLRootCert != v2.PgSSLRootCert {
+ d.PgSSLRootCert = utils.StringPointer(v2.PgSSLRootCert)
+ }
+ if v1.MySQLLocation != v2.MySQLLocation {
+ d.MySQLLocation = utils.StringPointer(v2.MySQLLocation)
+ }
return d
}
-func diffDataDBJsonCfg(d *DbJsonCfg, v1, v2 *DataDbCfg) *DbJsonCfg {
+func diffDataDBConnJsonCfg(d *DbConnJson, v1, v2 *DBConn) *DbConnJson {
if d == nil {
- d = new(DbJsonCfg)
+ d = new(DbConnJson)
}
if v1.Type != v2.Type {
d.Db_type = utils.StringPointer(v2.Type)
@@ -692,6 +898,12 @@ func diffDataDBJsonCfg(d *DbJsonCfg, v1, v2 *DataDbCfg) *DbJsonCfg {
if v1.Password != v2.Password {
d.Db_password = utils.StringPointer(v2.Password)
}
+ if !slices.Equal(v1.StringIndexedFields, v2.StringIndexedFields) {
+ d.String_indexed_fields = &v2.StringIndexedFields
+ }
+ if !slices.Equal(v1.PrefixIndexedFields, v2.PrefixIndexedFields) {
+ d.Prefix_indexed_fields = &v2.PrefixIndexedFields
+ }
if !slices.Equal(v1.RmtConns, v2.RmtConns) {
d.Remote_conns = &v2.RmtConns
}
@@ -707,13 +919,69 @@ func diffDataDBJsonCfg(d *DbJsonCfg, v1, v2 *DataDbCfg) *DbJsonCfg {
if v1.RplCache != v2.RplCache {
d.Replication_cache = utils.StringPointer(v2.RplCache)
}
+ return d
+}
+
+func diffDataDBConnsJsonCfg(d DbConnsJson, v1, v2 DBConns) DbConnsJson {
+ if d == nil {
+ d = make(DbConnsJson)
+ }
+ for key, val2 := range v2 {
+ if val1, has := v1[key]; !has {
+ d[key] = diffDataDBConnJsonCfg(d[key], new(DBConn), val2)
+ } else if !val1.Equals(val2) {
+ d[key] = diffDataDBConnJsonCfg(d[key], val1, val2)
+ }
+ }
+ return d
+}
+
+func (dbC *DBConn) Equals(dbC2 *DBConn) bool {
+ if dbC2 == nil {
+ return false
+ }
+ if dbC.Type != dbC2.Type ||
+ dbC.Host != dbC2.Host ||
+ dbC.Port != dbC2.Port ||
+ dbC.Name != dbC2.Name ||
+ dbC.User != dbC2.User ||
+ dbC.Password != dbC2.Password ||
+ dbC.RmtConnID != dbC2.RmtConnID ||
+ dbC.RplFiltered != dbC2.RplFiltered ||
+ dbC.RplCache != dbC2.RplCache {
+ return false
+ }
+ if len(dbC.RmtConns) != len(dbC2.RmtConns) {
+ return false
+ }
+ for i := range dbC.RmtConns {
+ if dbC.RmtConns[i] != dbC2.RmtConns[i] {
+ return false
+ }
+ }
+ if len(dbC.RplConns) != len(dbC2.RplConns) {
+ return false
+ }
+ for i := range dbC.RplConns {
+ if dbC.RplConns[i] != dbC2.RplConns[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func diffDataDBJsonCfg(d *DbJsonCfg, v1, v2 *DbCfg) *DbJsonCfg {
+ if d == nil {
+ d = new(DbJsonCfg)
+ }
+ d.Db_conns = diffDataDBConnsJsonCfg(d.Db_conns, v1.DBConns, v2.DBConns)
d.Items = diffMapItemOptJson(d.Items, v1.Items, v2.Items)
d.Opts = diffDataDBOptsJsonCfg(d.Opts, v1.Opts, v2.Opts)
return d
}
// ToTransCacheOpts returns to ltcache.TransCacheOpts from DataDBOpts
-func (d *DataDBOpts) ToTransCacheOpts() *ltcache.TransCacheOpts {
+func (d *DBOpts) ToTransCacheOpts() *ltcache.TransCacheOpts {
if d == nil {
return nil
}
diff --git a/config/migratorcfg.go b/config/migratorcfg.go
index f8b2322dc..1c7b41d85 100644
--- a/config/migratorcfg.go
+++ b/config/migratorcfg.go
@@ -19,9 +19,7 @@ along with this program. If not, see
package config
import (
- "fmt"
"slices"
- "strings"
"github.com/cgrates/birpc/context"
"github.com/cgrates/cgrates/utils"
@@ -29,15 +27,43 @@ import (
// MigratorCgrCfg the migrator config section
type MigratorCgrCfg struct {
- OutDataDBType string
- OutDataDBHost string
- OutDataDBPort string
- OutDataDBName string
- OutDataDBUser string
- OutDataDBPassword string
- OutDataDBEncoding string
- UsersFilters []string
- OutDataDBOpts *DataDBOpts
+ UsersFilters []string
+ FromItems map[string]*MigratorFromItem // contains the in items as the keys of the map, and the DataDB ids of each item in MigratorFromItems
+ OutDBOpts *DBOpts
+}
+
+// MigratorFromItem contains the DataDB id of the item
+type MigratorFromItem struct {
+ DBConn string // ID of the DB connection that this item belongs to
+}
+
+// loadFromJSONCfg loads Database config from JsonCfg
+func (mfi *MigratorFromItem) loadFromJSONCfg(jsonII *FromItemJson) (err error) {
+ if jsonII == nil {
+ return
+ }
+ if jsonII.DbConn != nil {
+ mfi.DBConn = *jsonII.DbConn
+ }
+ return
+}
+
+// Clone returns the cloned object
+func (mfi *MigratorFromItem) Clone() *MigratorFromItem {
+ if mfi == nil {
+ return nil
+ }
+ return &MigratorFromItem{
+ DBConn: mfi.DBConn,
+ }
+}
+
+// AsMapInterface returns the config as a map[string]any
+func (mfi *MigratorFromItem) AsMapInterface() (initialMP map[string]any) {
+ initialMP = map[string]any{
+ utils.DBConnCfg: mfi.DBConn,
+ }
+ return
}
// loadMigratorCgrCfg loads the Migrator section of the configuration
@@ -53,72 +79,59 @@ func (mg *MigratorCgrCfg) loadFromJSONCfg(jsnCfg *MigratorCfgJson) (err error) {
if jsnCfg == nil {
return
}
- if jsnCfg.Out_dataDB_type != nil {
-
- if !strings.HasPrefix(*jsnCfg.Out_dataDB_type, "*") {
- mg.OutDataDBType = fmt.Sprintf("*%v", *jsnCfg.Out_dataDB_type)
- } else {
- mg.OutDataDBType = *jsnCfg.Out_dataDB_type
- }
- }
- if jsnCfg.Out_dataDB_host != nil {
- mg.OutDataDBHost = *jsnCfg.Out_dataDB_host
- }
- if jsnCfg.Out_dataDB_port != nil {
- mg.OutDataDBPort = *jsnCfg.Out_dataDB_port
- }
- if jsnCfg.Out_dataDB_name != nil {
- mg.OutDataDBName = *jsnCfg.Out_dataDB_name
- }
- if jsnCfg.Out_dataDB_user != nil {
- mg.OutDataDBUser = *jsnCfg.Out_dataDB_user
- }
- if jsnCfg.Out_dataDB_password != nil {
- mg.OutDataDBPassword = *jsnCfg.Out_dataDB_password
- }
- if jsnCfg.Out_dataDB_encoding != nil {
- mg.OutDataDBEncoding = strings.TrimPrefix(*jsnCfg.Out_dataDB_encoding, "*")
- }
if jsnCfg.Users_filters != nil && len(*jsnCfg.Users_filters) != 0 {
mg.UsersFilters = slices.Clone(*jsnCfg.Users_filters)
}
- if jsnCfg.Out_dataDB_opts != nil {
- err = mg.OutDataDBOpts.loadFromJSONCfg(jsnCfg.Out_dataDB_opts)
+ if jsnCfg.FromItems != nil {
+ for kJsn, vJsn := range jsnCfg.FromItems {
+ val, has := mg.FromItems[kJsn]
+ if val == nil || !has {
+ val = new(MigratorFromItem)
+ }
+ if err = val.loadFromJSONCfg(vJsn); err != nil {
+ return
+ }
+ mg.FromItems[kJsn] = val
+ }
+ }
+ if jsnCfg.Out_db_opts != nil {
+ err = mg.OutDBOpts.loadFromJSONCfg(jsnCfg.Out_db_opts)
}
return
}
// AsMapInterface returns the config as a map[string]any
func (mg MigratorCgrCfg) AsMapInterface() any {
- outDataDBOpts := map[string]any{
- utils.RedisMaxConnsCfg: mg.OutDataDBOpts.RedisMaxConns,
- utils.RedisConnectAttemptsCfg: mg.OutDataDBOpts.RedisConnectAttempts,
- utils.RedisSentinelNameCfg: mg.OutDataDBOpts.RedisSentinel,
- utils.RedisClusterCfg: mg.OutDataDBOpts.RedisCluster,
- utils.RedisClusterSyncCfg: mg.OutDataDBOpts.RedisClusterSync.String(),
- utils.RedisClusterOnDownDelayCfg: mg.OutDataDBOpts.RedisClusterOndownDelay.String(),
- utils.RedisConnectTimeoutCfg: mg.OutDataDBOpts.RedisConnectTimeout.String(),
- utils.RedisReadTimeoutCfg: mg.OutDataDBOpts.RedisReadTimeout.String(),
- utils.RedisWriteTimeoutCfg: mg.OutDataDBOpts.RedisWriteTimeout.String(),
- utils.RedisPoolPipelineWindowCfg: mg.OutDataDBOpts.RedisPoolPipelineWindow.String(),
- utils.RedisPoolPipelineLimitCfg: mg.OutDataDBOpts.RedisPoolPipelineLimit,
- utils.RedisTLSCfg: mg.OutDataDBOpts.RedisTLS,
- utils.RedisClientCertificateCfg: mg.OutDataDBOpts.RedisClientCertificate,
- utils.RedisClientKeyCfg: mg.OutDataDBOpts.RedisClientKey,
- utils.RedisCACertificateCfg: mg.OutDataDBOpts.RedisCACertificate,
- utils.MongoQueryTimeoutCfg: mg.OutDataDBOpts.MongoQueryTimeout.String(),
- utils.MongoConnSchemeCfg: mg.OutDataDBOpts.MongoConnScheme,
+ outDBOpts := map[string]any{
+ utils.RedisMaxConnsCfg: mg.OutDBOpts.RedisMaxConns,
+ utils.RedisConnectAttemptsCfg: mg.OutDBOpts.RedisConnectAttempts,
+ utils.RedisSentinelNameCfg: mg.OutDBOpts.RedisSentinel,
+ utils.RedisClusterCfg: mg.OutDBOpts.RedisCluster,
+ utils.RedisClusterSyncCfg: mg.OutDBOpts.RedisClusterSync.String(),
+ utils.RedisClusterOnDownDelayCfg: mg.OutDBOpts.RedisClusterOndownDelay.String(),
+ utils.RedisConnectTimeoutCfg: mg.OutDBOpts.RedisConnectTimeout.String(),
+ utils.RedisReadTimeoutCfg: mg.OutDBOpts.RedisReadTimeout.String(),
+ utils.RedisWriteTimeoutCfg: mg.OutDBOpts.RedisWriteTimeout.String(),
+ utils.RedisPoolPipelineWindowCfg: mg.OutDBOpts.RedisPoolPipelineWindow.String(),
+ utils.RedisPoolPipelineLimitCfg: mg.OutDBOpts.RedisPoolPipelineLimit,
+ utils.RedisTLSCfg: mg.OutDBOpts.RedisTLS,
+ utils.RedisClientCertificateCfg: mg.OutDBOpts.RedisClientCertificate,
+ utils.RedisClientKeyCfg: mg.OutDBOpts.RedisClientKey,
+ utils.RedisCACertificateCfg: mg.OutDBOpts.RedisCACertificate,
+ utils.MongoQueryTimeoutCfg: mg.OutDBOpts.MongoQueryTimeout.String(),
+ utils.MongoConnSchemeCfg: mg.OutDBOpts.MongoConnScheme,
+ }
+ var items map[string]any
+ if mg.FromItems != nil {
+ items = make(map[string]any)
+ for itemID, item := range mg.FromItems {
+ items[itemID] = item.AsMapInterface()
+ }
}
return map[string]any{
- utils.OutDataDBTypeCfg: mg.OutDataDBType,
- utils.OutDataDBHostCfg: mg.OutDataDBHost,
- utils.OutDataDBPortCfg: mg.OutDataDBPort,
- utils.OutDataDBNameCfg: mg.OutDataDBName,
- utils.OutDataDBUserCfg: mg.OutDataDBUser,
- utils.OutDataDBPasswordCfg: mg.OutDataDBPassword,
- utils.OutDataDBEncodingCfg: mg.OutDataDBEncoding,
- utils.OutDataDBOptsCfg: outDataDBOpts,
- utils.UsersFiltersCfg: slices.Clone(mg.UsersFilters),
+ utils.FromItemsCfg: items,
+ utils.OutDBOptsCfg: outDBOpts,
+ utils.UsersFiltersCfg: slices.Clone(mg.UsersFilters),
}
}
@@ -128,15 +141,11 @@ func (mg MigratorCgrCfg) CloneSection() Section { return mg.Clone() }
// Clone returns a deep copy of MigratorCgrCfg
func (mg MigratorCgrCfg) Clone() (cln *MigratorCgrCfg) {
cln = &MigratorCgrCfg{
- OutDataDBType: mg.OutDataDBType,
- OutDataDBHost: mg.OutDataDBHost,
- OutDataDBPort: mg.OutDataDBPort,
- OutDataDBName: mg.OutDataDBName,
- OutDataDBUser: mg.OutDataDBUser,
- OutDataDBPassword: mg.OutDataDBPassword,
- OutDataDBEncoding: mg.OutDataDBEncoding,
-
- OutDataDBOpts: mg.OutDataDBOpts.Clone(),
+ FromItems: make(map[string]*MigratorFromItem),
+ OutDBOpts: mg.OutDBOpts.Clone(),
+ }
+ for k, v := range mg.FromItems {
+ cln.FromItems[k] = v.Clone()
}
if mg.UsersFilters != nil {
cln.UsersFilters = slices.Clone(mg.UsersFilters)
@@ -145,46 +154,54 @@ func (mg MigratorCgrCfg) Clone() (cln *MigratorCgrCfg) {
}
type MigratorCfgJson struct {
- Out_dataDB_type *string
- Out_dataDB_host *string
- Out_dataDB_port *string
- Out_dataDB_name *string
- Out_dataDB_user *string
- Out_dataDB_password *string
- Out_dataDB_encoding *string
- Users_filters *[]string
- Out_dataDB_opts *DBOptsJson
+ Users_filters *[]string
+ FromItems map[string]*FromItemJson
+ Out_db_opts *DBOptsJson
+}
+
+type FromItemJson struct {
+ DbConn *string
+}
+
+func (mfi *MigratorFromItem) Equals(itm2 *MigratorFromItem) bool {
+ return mfi == nil && itm2 == nil ||
+ mfi != nil && itm2 != nil && mfi.DBConn == itm2.DBConn
+}
+
+func diffFromItemJson(d *FromItemJson, v1, v2 *MigratorFromItem) *FromItemJson {
+ if d == nil {
+ d = new(FromItemJson)
+ }
+ if v2.DBConn != v1.DBConn {
+ d.DbConn = utils.StringPointer(v2.DBConn)
+ }
+ return d
+}
+
+func diffMapFromItemJson(d map[string]*FromItemJson, v1 map[string]*MigratorFromItem,
+ v2 map[string]*MigratorFromItem) map[string]*FromItemJson {
+ if d == nil {
+ d = make(map[string]*FromItemJson)
+ }
+ for k, val2 := range v2 {
+ if val1, has := v1[k]; !has {
+ d[k] = diffFromItemJson(d[k], new(MigratorFromItem), val2)
+ } else if !val1.Equals(val2) {
+ d[k] = diffFromItemJson(d[k], val1, val2)
+ }
+ }
+ return d
}
func diffMigratorCfgJson(d *MigratorCfgJson, v1, v2 *MigratorCgrCfg) *MigratorCfgJson {
if d == nil {
d = new(MigratorCfgJson)
}
- if v1.OutDataDBType != v2.OutDataDBType {
- d.Out_dataDB_type = utils.StringPointer(v2.OutDataDBType)
- }
- if v1.OutDataDBHost != v2.OutDataDBHost {
- d.Out_dataDB_host = utils.StringPointer(v2.OutDataDBHost)
- }
- if v1.OutDataDBPort != v2.OutDataDBPort {
- d.Out_dataDB_port = utils.StringPointer(v2.OutDataDBPort)
- }
- if v1.OutDataDBName != v2.OutDataDBName {
- d.Out_dataDB_name = utils.StringPointer(v2.OutDataDBName)
- }
- if v1.OutDataDBUser != v2.OutDataDBUser {
- d.Out_dataDB_user = utils.StringPointer(v2.OutDataDBUser)
- }
- if v1.OutDataDBPassword != v2.OutDataDBPassword {
- d.Out_dataDB_password = utils.StringPointer(v2.OutDataDBPassword)
- }
- if v1.OutDataDBEncoding != v2.OutDataDBEncoding {
- d.Out_dataDB_encoding = utils.StringPointer(v2.OutDataDBEncoding)
- }
if !slices.Equal(v1.UsersFilters, v2.UsersFilters) {
d.Users_filters = utils.SliceStringPointer(slices.Clone(v2.UsersFilters))
}
- d.Out_dataDB_opts = diffDataDBOptsJsonCfg(d.Out_dataDB_opts, v1.OutDataDBOpts, v2.OutDataDBOpts)
+ d.FromItems = diffMapFromItemJson(d.FromItems, v1.FromItems, v2.FromItems)
+ d.Out_db_opts = diffDataDBOptsJsonCfg(d.Out_db_opts, v1.OutDBOpts, v2.OutDBOpts)
return d
}
diff --git a/config/stordbcfg.go b/config/stordbcfg.go
deleted file mode 100644
index 08a0b4f04..000000000
--- a/config/stordbcfg.go
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package config
-
-import (
- "fmt"
- "reflect"
- "slices"
- "strconv"
- "strings"
- "time"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/utils"
- "github.com/cgrates/ltcache"
-)
-
-type StorDBOpts struct {
- InternalDBDumpPath string // Path to the dump file
- InternalDBBackupPath string // Path where db dump will backup
- InternalDBStartTimeout time.Duration // Transcache recover from dump files timeout duration
- InternalDBDumpInterval time.Duration // Regurarly dump database to file
- InternalDBRewriteInterval time.Duration // Regurarly rewrite dump files
- InternalDBFileSizeLimit int64 // maximum size that can be written in a singular dump file
- SQLMaxOpenConns int
- SQLMaxIdleConns int
- SQLLogLevel int
- SQLConnMaxLifetime time.Duration
- SQLDSNParams map[string]string
- PgSSLMode string
- PgSSLCert string
- PgSSLKey string
- PgSSLPassword string
- PgSSLCertMode string
- PgSSLRootCert string
- MySQLLocation string
- MongoQueryTimeout time.Duration
- MongoConnScheme string
-}
-
-// StorDbCfg StroreDb config
-type StorDbCfg struct {
- Type string // should reflect the database type used to store logs
- Host string // the host to connect to. Values that start with / are for UNIX domain sockets
- Port string // the port to bind to
- Name string // the name of the database to connect to
- User string // the user to sign in as
- Password string // the user's password
- StringIndexedFields []string
- PrefixIndexedFields []string
- RmtConns []string // remote DataDB connIDs
- RplConns []string // replication connIDs
- Items map[string]*ItemOpts
- Opts *StorDBOpts
-}
-
-// loadStorDBCfg loads the StorDB section of the configuration
-func (dbcfg *StorDbCfg) Load(ctx *context.Context, jsnCfg ConfigDB, _ *CGRConfig) (err error) {
- jsnDataDbCfg := new(DbJsonCfg)
- if err = jsnCfg.GetSection(ctx, StorDBJSON, jsnDataDbCfg); err != nil {
- return
- }
- return dbcfg.loadFromJSONCfg(jsnDataDbCfg)
-}
-
-func (dbOpts *StorDBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) {
- if jsnCfg == nil {
- return
- }
- if jsnCfg.InternalDBDumpPath != nil {
- dbOpts.InternalDBDumpPath = *jsnCfg.InternalDBDumpPath
- }
- if jsnCfg.InternalDBBackupPath != nil {
- dbOpts.InternalDBBackupPath = *jsnCfg.InternalDBBackupPath
- }
- if jsnCfg.InternalDBStartTimeout != nil {
- if dbOpts.InternalDBStartTimeout, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBStartTimeout); err != nil {
- return err
- }
- }
- if jsnCfg.InternalDBDumpInterval != nil {
- if dbOpts.InternalDBDumpInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBDumpInterval); err != nil {
- return err
- }
- }
- if jsnCfg.InternalDBRewriteInterval != nil {
- if dbOpts.InternalDBRewriteInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBRewriteInterval); err != nil {
- return err
- }
- }
- if jsnCfg.InternalDBFileSizeLimit != nil {
- if dbOpts.InternalDBFileSizeLimit, err = utils.ParseBinarySize(*jsnCfg.InternalDBFileSizeLimit); err != nil {
- return err
- }
- }
- if jsnCfg.SQLMaxOpenConns != nil {
- dbOpts.SQLMaxOpenConns = *jsnCfg.SQLMaxOpenConns
- }
- if jsnCfg.SQLMaxIdleConns != nil {
- dbOpts.SQLMaxIdleConns = *jsnCfg.SQLMaxIdleConns
- }
- if jsnCfg.SQLLogLevel != nil {
- dbOpts.SQLLogLevel = *jsnCfg.SQLLogLevel
- }
- if jsnCfg.SQLConnMaxLifetime != nil {
- if dbOpts.SQLConnMaxLifetime, err = utils.ParseDurationWithNanosecs(*jsnCfg.SQLConnMaxLifetime); err != nil {
- return
- }
- }
- if jsnCfg.MYSQLDSNParams != nil {
- dbOpts.SQLDSNParams = make(map[string]string)
- dbOpts.SQLDSNParams = jsnCfg.MYSQLDSNParams
- }
- if jsnCfg.PgSSLMode != nil {
- dbOpts.PgSSLMode = *jsnCfg.PgSSLMode
- }
- if jsnCfg.PgSSLCert != nil {
- dbOpts.PgSSLCert = *jsnCfg.PgSSLCert
- }
- if jsnCfg.PgSSLKey != nil {
- dbOpts.PgSSLKey = *jsnCfg.PgSSLKey
- }
- if jsnCfg.PgSSLPassword != nil {
- dbOpts.PgSSLPassword = *jsnCfg.PgSSLPassword
- }
- if jsnCfg.PgSSLCertMode != nil {
- dbOpts.PgSSLCertMode = *jsnCfg.PgSSLCertMode
- }
- if jsnCfg.PgSSLRootCert != nil {
- dbOpts.PgSSLRootCert = *jsnCfg.PgSSLRootCert
- }
- if jsnCfg.MySQLLocation != nil {
- dbOpts.MySQLLocation = *jsnCfg.MySQLLocation
- }
- if jsnCfg.MongoQueryTimeout != nil {
- if dbOpts.MongoQueryTimeout, err = utils.ParseDurationWithNanosecs(*jsnCfg.MongoQueryTimeout); err != nil {
- return
- }
- }
- if jsnCfg.MongoConnScheme != nil {
- dbOpts.MongoConnScheme = *jsnCfg.MongoConnScheme
- }
- return
-}
-
-// loadFromJSONCfg loads StoreDb config from JsonCfg
-func (dbcfg *StorDbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) {
- if jsnDbCfg == nil {
- return nil
- }
- if jsnDbCfg.Db_type != nil {
- if !strings.HasPrefix(*jsnDbCfg.Db_type, "*") {
- dbcfg.Type = fmt.Sprintf("*%v", *jsnDbCfg.Db_type)
- } else {
- dbcfg.Type = *jsnDbCfg.Db_type
- }
- }
- if jsnDbCfg.Db_host != nil {
- dbcfg.Host = *jsnDbCfg.Db_host
- }
- if jsnDbCfg.Db_port != nil {
- port := strconv.Itoa(*jsnDbCfg.Db_port)
- if port == "-1" {
- port = utils.MetaDynamic
- }
- dbcfg.Port = defaultDBPort(dbcfg.Type, port)
- }
- if jsnDbCfg.Db_name != nil {
- dbcfg.Name = *jsnDbCfg.Db_name
- }
- if jsnDbCfg.Db_user != nil {
- dbcfg.User = *jsnDbCfg.Db_user
- }
- if jsnDbCfg.Db_password != nil {
- dbcfg.Password = *jsnDbCfg.Db_password
- }
- if jsnDbCfg.String_indexed_fields != nil {
- dbcfg.StringIndexedFields = *jsnDbCfg.String_indexed_fields
- }
- if jsnDbCfg.Prefix_indexed_fields != nil {
- dbcfg.PrefixIndexedFields = *jsnDbCfg.Prefix_indexed_fields
- }
- if jsnDbCfg.Remote_conns != nil {
- dbcfg.RmtConns = make([]string, len(*jsnDbCfg.Remote_conns))
- for i, item := range *jsnDbCfg.Remote_conns {
- if item == utils.MetaInternal {
- return fmt.Errorf("Remote connection ID needs to be different than *internal ")
- }
- dbcfg.RmtConns[i] = item
- }
- }
- if jsnDbCfg.Replication_conns != nil {
- dbcfg.RplConns = make([]string, len(*jsnDbCfg.Replication_conns))
- for i, item := range *jsnDbCfg.Replication_conns {
- if item == utils.MetaInternal {
- return fmt.Errorf("Replication connection ID needs to be different than *internal ")
- }
- dbcfg.RplConns[i] = item
- }
- }
- if jsnDbCfg.Items != nil {
- for kJsn, vJsn := range jsnDbCfg.Items {
- val := new(ItemOpts)
- if err = val.loadFromJSONCfg(vJsn); err != nil {
- return
- }
- dbcfg.Items[kJsn] = val
- }
- }
- if jsnDbCfg.Opts != nil {
- err = dbcfg.Opts.loadFromJSONCfg(jsnDbCfg.Opts)
- }
- return
-}
-
-func (StorDbCfg) SName() string { return StorDBJSON }
-func (dbcfg StorDbCfg) CloneSection() Section { return dbcfg.Clone() }
-
-func (dbOpts *StorDBOpts) Clone() *StorDBOpts {
- return &StorDBOpts{
- InternalDBDumpPath: dbOpts.InternalDBDumpPath,
- InternalDBBackupPath: dbOpts.InternalDBBackupPath,
- InternalDBStartTimeout: dbOpts.InternalDBStartTimeout,
- InternalDBDumpInterval: dbOpts.InternalDBDumpInterval,
- InternalDBRewriteInterval: dbOpts.InternalDBRewriteInterval,
- InternalDBFileSizeLimit: dbOpts.InternalDBFileSizeLimit,
- SQLMaxOpenConns: dbOpts.SQLMaxOpenConns,
- SQLMaxIdleConns: dbOpts.SQLMaxIdleConns,
- SQLLogLevel: dbOpts.SQLLogLevel,
- SQLConnMaxLifetime: dbOpts.SQLConnMaxLifetime,
- SQLDSNParams: dbOpts.SQLDSNParams,
- PgSSLMode: dbOpts.PgSSLMode,
- PgSSLCert: dbOpts.PgSSLCert,
- PgSSLKey: dbOpts.PgSSLKey,
- PgSSLPassword: dbOpts.PgSSLPassword,
- PgSSLCertMode: dbOpts.PgSSLCertMode,
- PgSSLRootCert: dbOpts.PgSSLRootCert,
- MySQLLocation: dbOpts.MySQLLocation,
- MongoQueryTimeout: dbOpts.MongoQueryTimeout,
- MongoConnScheme: dbOpts.MongoConnScheme,
- }
-}
-
-// Clone returns the cloned object
-func (dbcfg StorDbCfg) Clone() (cln *StorDbCfg) {
- cln = &StorDbCfg{
- Type: dbcfg.Type,
- Host: dbcfg.Host,
- Port: dbcfg.Port,
- Name: dbcfg.Name,
- User: dbcfg.User,
- Password: dbcfg.Password,
-
- Items: make(map[string]*ItemOpts),
- Opts: dbcfg.Opts.Clone(),
- }
- for key, item := range dbcfg.Items {
- cln.Items[key] = item.Clone()
- }
- if dbcfg.StringIndexedFields != nil {
- cln.StringIndexedFields = slices.Clone(dbcfg.StringIndexedFields)
- }
- if dbcfg.PrefixIndexedFields != nil {
- cln.PrefixIndexedFields = slices.Clone(dbcfg.PrefixIndexedFields)
- }
- if dbcfg.RmtConns != nil {
- cln.RmtConns = slices.Clone(dbcfg.RmtConns)
- }
- if dbcfg.RplConns != nil {
- cln.RplConns = slices.Clone(dbcfg.RplConns)
- }
- return
-}
-
-// AsMapInterface returns the config as a map[string]any
-func (dbcfg StorDbCfg) AsMapInterface() any {
- opts := map[string]any{
- utils.InternalDBDumpPathCfg: dbcfg.Opts.InternalDBDumpPath,
- utils.InternalDBBackupPathCfg: dbcfg.Opts.InternalDBBackupPath,
- utils.InternalDBStartTimeoutCfg: dbcfg.Opts.InternalDBStartTimeout.String(),
- utils.InternalDBDumpIntervalCfg: dbcfg.Opts.InternalDBDumpInterval.String(),
- utils.InternalDBRewriteIntervalCfg: dbcfg.Opts.InternalDBRewriteInterval.String(),
- utils.InternalDBFileSizeLimitCfg: dbcfg.Opts.InternalDBFileSizeLimit,
- utils.SQLMaxOpenConnsCfg: dbcfg.Opts.SQLMaxOpenConns,
- utils.SQLMaxIdleConnsCfg: dbcfg.Opts.SQLMaxIdleConns,
- utils.SQLLogLevelCfg: dbcfg.Opts.SQLLogLevel,
- utils.SQLConnMaxLifetime: dbcfg.Opts.SQLConnMaxLifetime.String(),
- utils.MYSQLDSNParams: dbcfg.Opts.SQLDSNParams,
- utils.PgSSLModeCfg: dbcfg.Opts.PgSSLMode,
- utils.MysqlLocation: dbcfg.Opts.MySQLLocation,
- utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(),
- utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme,
- }
- if dbcfg.Opts.PgSSLCert != "" {
- opts[utils.PgSSLCertCfg] = dbcfg.Opts.PgSSLCert
- }
- if dbcfg.Opts.PgSSLKey != "" {
- opts[utils.PgSSLKeyCfg] = dbcfg.Opts.PgSSLKey
- }
- if dbcfg.Opts.PgSSLPassword != "" {
- opts[utils.PgSSLPasswordCfg] = dbcfg.Opts.PgSSLPassword
- }
- if dbcfg.Opts.PgSSLCertMode != "" {
- opts[utils.PgSSLCertModeCfg] = dbcfg.Opts.PgSSLCertMode
- }
- if dbcfg.Opts.PgSSLRootCert != "" {
- opts[utils.PgSSLRootCertCfg] = dbcfg.Opts.PgSSLRootCert
- }
- mp := map[string]any{
- utils.DataDbTypeCfg: utils.Meta + dbcfg.Type,
- utils.DataDbHostCfg: dbcfg.Host,
- utils.DataDbNameCfg: dbcfg.Name,
- utils.DataDbUserCfg: dbcfg.User,
- utils.DataDbPassCfg: dbcfg.Password,
- utils.StringIndexedFieldsCfg: dbcfg.StringIndexedFields,
- utils.PrefixIndexedFieldsCfg: dbcfg.PrefixIndexedFields,
- utils.RemoteConnsCfg: dbcfg.RmtConns,
- utils.ReplicationConnsCfg: dbcfg.RplConns,
- utils.OptsCfg: opts,
- }
- if dbcfg.Items != nil {
- items := make(map[string]any)
- for key, item := range dbcfg.Items {
- items[key] = item.AsMapInterface()
- }
- mp[utils.ItemsCfg] = items
- }
- if dbcfg.Port != utils.EmptyString {
- dbPort, _ := strconv.Atoi(dbcfg.Port)
- mp[utils.DataDbPortCfg] = dbPort
- }
- return mp
-}
-
-func diffStorDBOptsJsonCfg(d *DBOptsJson, v1, v2 *StorDBOpts) *DBOptsJson {
- if d == nil {
- d = new(DBOptsJson)
- }
- if v1.InternalDBDumpPath != v2.InternalDBDumpPath {
- d.InternalDBDumpPath = utils.StringPointer(v2.InternalDBDumpPath)
- }
- if v1.InternalDBBackupPath != v2.InternalDBBackupPath {
- d.InternalDBBackupPath = utils.StringPointer(v2.InternalDBBackupPath)
- }
- if v1.InternalDBStartTimeout != v2.InternalDBStartTimeout {
- d.InternalDBStartTimeout = utils.StringPointer(v2.InternalDBStartTimeout.String())
- }
- if v1.InternalDBDumpInterval != v2.InternalDBDumpInterval {
- d.InternalDBDumpInterval = utils.StringPointer(v2.InternalDBDumpInterval.String())
- }
- if v1.InternalDBRewriteInterval != v2.InternalDBRewriteInterval {
- d.InternalDBRewriteInterval = utils.StringPointer(v2.InternalDBRewriteInterval.String())
- }
- if v1.InternalDBFileSizeLimit != v2.InternalDBFileSizeLimit {
- d.InternalDBFileSizeLimit = utils.StringPointer(fmt.Sprint(v2.InternalDBFileSizeLimit))
- }
- if v1.SQLMaxOpenConns != v2.SQLMaxOpenConns {
- d.SQLMaxOpenConns = utils.IntPointer(v2.SQLMaxOpenConns)
- }
- if v1.SQLMaxIdleConns != v2.SQLMaxIdleConns {
- d.SQLMaxIdleConns = utils.IntPointer(v2.SQLMaxIdleConns)
- }
- if v1.SQLLogLevel != v2.SQLLogLevel {
- d.SQLLogLevel = utils.IntPointer(v2.SQLLogLevel)
- }
- if v1.SQLConnMaxLifetime != v2.SQLConnMaxLifetime {
- d.SQLConnMaxLifetime = utils.StringPointer(v2.SQLConnMaxLifetime.String())
- }
- if !reflect.DeepEqual(v1.SQLDSNParams, v2.SQLDSNParams) {
- d.MYSQLDSNParams = v2.SQLDSNParams
- }
- if v1.PgSSLMode != v2.PgSSLMode {
- d.PgSSLMode = utils.StringPointer(v2.PgSSLMode)
- }
- if v1.PgSSLCert != v2.PgSSLCert {
- d.PgSSLCert = utils.StringPointer(v2.PgSSLCert)
- }
- if v1.PgSSLKey != v2.PgSSLKey {
- d.PgSSLKey = utils.StringPointer(v2.PgSSLKey)
- }
- if v1.PgSSLPassword != v2.PgSSLPassword {
- d.PgSSLPassword = utils.StringPointer(v2.PgSSLPassword)
- }
- if v1.PgSSLCertMode != v2.PgSSLCertMode {
- d.PgSSLCertMode = utils.StringPointer(v2.PgSSLCertMode)
- }
- if v1.PgSSLRootCert != v2.PgSSLRootCert {
- d.PgSSLRootCert = utils.StringPointer(v2.PgSSLRootCert)
- }
- if v1.MySQLLocation != v2.MySQLLocation {
- d.MySQLLocation = utils.StringPointer(v2.MySQLLocation)
- }
- if v1.MongoQueryTimeout != v2.MongoQueryTimeout {
- d.MongoQueryTimeout = utils.StringPointer(v2.MongoQueryTimeout.String())
- }
- if v1.MongoConnScheme != v2.MongoConnScheme {
- d.MongoConnScheme = utils.StringPointer(v2.MongoConnScheme)
- }
- return d
-}
-
-func diffStorDBJsonCfg(d *DbJsonCfg, v1, v2 *StorDbCfg) *DbJsonCfg {
- if d == nil {
- d = new(DbJsonCfg)
- }
- if v1.Type != v2.Type {
- d.Db_type = utils.StringPointer(v2.Type)
- }
- if v1.Host != v2.Host {
- d.Db_host = utils.StringPointer(v2.Host)
- }
- if v1.Port != v2.Port {
- port, _ := strconv.Atoi(v2.Port)
- d.Db_port = utils.IntPointer(port)
- }
- if v1.Name != v2.Name {
- d.Db_name = utils.StringPointer(v2.Name)
- }
- if v1.User != v2.User {
- d.Db_user = utils.StringPointer(v2.User)
- }
- if v1.Password != v2.Password {
- d.Db_password = utils.StringPointer(v2.Password)
- }
- if !slices.Equal(v1.RmtConns, v2.RmtConns) {
- d.Remote_conns = &v2.RmtConns
- }
-
- if !slices.Equal(v1.RplConns, v2.RplConns) {
- d.Replication_conns = &v2.RplConns
- }
-
- if !slices.Equal(v1.StringIndexedFields, v2.StringIndexedFields) {
- d.String_indexed_fields = &v2.StringIndexedFields
- }
- if !slices.Equal(v1.PrefixIndexedFields, v2.PrefixIndexedFields) {
- d.Prefix_indexed_fields = &v2.PrefixIndexedFields
- }
-
- d.Items = diffMapItemOptJson(d.Items, v1.Items, v2.Items)
- d.Opts = diffStorDBOptsJsonCfg(d.Opts, v1.Opts, v2.Opts)
-
- return d
-}
-
-// ToTransCacheOpts returns to ltcache.TransCacheOpts from StorDBOpts
-func (s *StorDBOpts) ToTransCacheOpts() *ltcache.TransCacheOpts {
- if s == nil {
- return nil
- }
- return <cache.TransCacheOpts{
- DumpPath: s.InternalDBDumpPath,
- BackupPath: s.InternalDBBackupPath,
- StartTimeout: s.InternalDBStartTimeout,
- DumpInterval: s.InternalDBDumpInterval,
- RewriteInterval: s.InternalDBRewriteInterval,
- FileSizeLimit: s.InternalDBFileSizeLimit,
- }
-}
diff --git a/debian/dirs b/debian/dirs
index f6fcab367..4e5dcf250 100644
--- a/debian/dirs
+++ b/debian/dirs
@@ -1,7 +1,5 @@
-var/lib/cgrates/internal_db/datadb
-var/lib/cgrates/internal_db/backup/datadb
-var/lib/cgrates/internal_db/stordb
-var/lib/cgrates/internal_db/backup/stordb
+var/lib/cgrates/internal_db/db
+var/lib/cgrates/internal_db/backup/db
var/lib/cgrates/internal_db/configdb
var/lib/cgrates/internal_db/backup/configdb
var/log/cgrates
diff --git a/engine/datadbmock.go b/engine/datadbmock.go
index 083a29ef4..3522f862c 100644
--- a/engine/datadbmock.go
+++ b/engine/datadbmock.go
@@ -585,6 +585,14 @@ func (dbM *DataDBMock) BackupDataDB(backupFolderPath string, zip bool) (err erro
return utils.ErrNotImplemented
}
-func (dbM *DataDBMock) BackupStorDBDump(backupFolderPath string, zip bool) (err error) {
+func (dbM *DataDBMock) SetCDR(_ *context.Context, cdr *utils.CGREvent, allowUpdate bool) error {
+ return utils.ErrNotImplemented
+}
+
+func (dbM *DataDBMock) GetCDRs(ctx *context.Context, qryFltr []*Filter, opts map[string]any) ([]*utils.CDR, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+func (dbM *DataDBMock) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err error) {
return utils.ErrNotImplemented
}
diff --git a/engine/datamanager.go b/engine/datamanager.go
index d974ea28a..94982b32c 100644
--- a/engine/datamanager.go
+++ b/engine/datamanager.go
@@ -22,6 +22,7 @@ import (
"fmt"
"slices"
"strings"
+ "sync"
"github.com/cgrates/baningo"
"github.com/cgrates/birpc/context"
@@ -83,33 +84,159 @@ var (
)
// NewDataManager returns a new DataManager
-func NewDataManager(dataDB DataDB, cfg *config.CGRConfig, connMgr *ConnManager) *DataManager {
+func NewDataManager(dbConns *DBConnManager, cfg *config.CGRConfig, connMgr *ConnManager) *DataManager {
ms, _ := utils.NewMarshaler(cfg.GeneralCfg().DBDataEncoding)
+ dbConns.dataDBCfg = cfg.DbCfg()
return &DataManager{
- dataDB: dataDB,
+ dbConns: dbConns,
cfg: cfg,
connMgr: connMgr,
ms: ms,
}
}
+// DBConnManager is the storage manager for all CGRateS DataDBs
+// transparently manages data retrieval, further serialization and caching
+type DBConnManager struct {
+ sync.RWMutex // used for locking when using the dataDBs map, important for config reloading
+ dataDBs map[string]DataDB // Holds all DataDBs connected with CGRateS
+ dataDBCfg *config.DbCfg // Hold the data_db config
+}
+
+// NewDBConnManager returns a new DBConnManager
+func NewDBConnManager(dataDBs map[string]DataDB, dataDBCfg *config.DbCfg) *DBConnManager {
+ return &DBConnManager{
+ dataDBs: dataDBs,
+ dataDBCfg: dataDBCfg,
+ }
+}
+
+// GetConn returns the DataDB and DBConn config where the provided itemID is stored.
+// Returns the *default dataDB conn if it can't find the item's corresponding DB
+func (dbConns *DBConnManager) GetConn(itemID string) (db DataDB, dbConnCfg *config.DBConn, err error) {
+ dbConns.RLock()
+ defer dbConns.RUnlock()
+ var ok bool
+ dbConnID, ok := dbConns.dataDBCfg.Items[itemID]
+ if !ok {
+ return nil, nil, fmt.Errorf("couldn't find item with ID: <%v>", itemID)
+ }
+ if db, ok = dbConns.dataDBs[dbConnID.DBConn]; !ok {
+ // return *default db if DBConn is not found
+ return dbConns.dataDBs[utils.MetaDefault], dbConns.dataDBCfg.DBConns[dbConnID.DBConn], nil
+ }
+ dbConnCfg = dbConns.dataDBCfg.DBConns[dbConnID.DBConn]
+ return
+}
+
+// AddDataDBDriver adds a new DataDBDriver to DBConnManager
+func (dbConns *DBConnManager) AddDataDBDriver(dbID string, d DataDBDriver) {
+ if dbConns.dataDBs == nil {
+ dbConns.dataDBs = make(map[string]DataDB)
+ }
+ dbConns.dataDBs[dbID] = d
+}
+
// DataManager is the data storage manager for CGRateS
// transparently manages data retrieval, further serialization and caching
type DataManager struct {
- dataDB DataDB
+ dbConns *DBConnManager
cfg *config.CGRConfig
connMgr *ConnManager
ms utils.Marshaler
}
-// DataDB exports access to dataDB
-func (dm *DataManager) DataDB() DataDB {
+// DBConns exports access to DBConns
+func (dm *DataManager) DBConns() *DBConnManager {
if dm != nil {
- return dm.dataDB
+ return dm.dbConns
}
return nil
}
+// DataDB exports access to dataDB
+func (dm *DataManager) DataDB() map[string]DataDB {
+ if dm != nil {
+ return dm.dbConns.dataDBs
+ }
+ return nil
+}
+
+// Will return the DataDB based on where the item with the prefix is stored
+func (dbConns *DBConnManager) getDataDBForPrefix(prefix string) (dataDB DataDB, err error) {
+ var itemID string
+ switch prefix {
+ case utils.ResourceProfilesPrefix:
+ itemID = utils.MetaResourceProfiles
+ case utils.ResourcesPrefix:
+ itemID = utils.MetaResources
+ case utils.IPProfilesPrefix:
+ itemID = utils.MetaIPProfiles
+ case utils.IPAllocationsPrefix:
+ itemID = utils.MetaIPAllocations
+ case utils.StatQueueProfilePrefix:
+ itemID = utils.MetaStatQueueProfiles
+ case utils.StatQueuePrefix:
+ itemID = utils.MetaStatQueues
+ case utils.ThresholdProfilePrefix:
+ itemID = utils.MetaThresholdProfiles
+ case utils.RankingProfilePrefix:
+ itemID = utils.MetaRankingProfiles
+ case utils.RankingPrefix:
+ itemID = utils.MetaRankings
+ case utils.TrendProfilePrefix:
+ itemID = utils.MetaTrendProfiles
+ case utils.TrendPrefix:
+ itemID = utils.MetaTrends
+ case utils.ThresholdPrefix:
+ itemID = utils.MetaThresholds
+ case utils.FilterPrefix:
+ itemID = utils.MetaFilters
+ case utils.RouteProfilePrefix:
+ itemID = utils.MetaRouteProfiles
+ case utils.AttributeProfilePrefix:
+ itemID = utils.MetaAttributeProfiles
+ case utils.ChargerProfilePrefix:
+ itemID = utils.MetaChargerProfiles
+ case utils.AccountPrefix:
+ itemID = utils.MetaAccounts
+ case utils.RateProfilePrefix:
+ itemID = utils.MetaRateProfiles
+ case utils.ActionProfilePrefix:
+ itemID = utils.MetaActionProfiles
+ case utils.AttributeFilterIndexes:
+ itemID = utils.CacheAttributeFilterIndexes
+ case utils.ResourceFilterIndexes:
+ itemID = utils.CacheResourceFilterIndexes
+ case utils.IPFilterIndexes:
+ itemID = utils.CacheIPFilterIndexes
+ case utils.StatFilterIndexes:
+ itemID = utils.CacheStatFilterIndexes
+ case utils.ThresholdFilterIndexes:
+ itemID = utils.CacheThresholdFilterIndexes
+ case utils.RouteFilterIndexes:
+ itemID = utils.CacheRouteFilterIndexes
+ case utils.ChargerFilterIndexes:
+ itemID = utils.CacheChargerFilterIndexes
+ case utils.RateProfilesFilterIndexPrfx:
+ itemID = utils.CacheRateProfilesFilterIndexes
+ case utils.RateFilterIndexPrfx:
+ itemID = utils.CacheRateFilterIndexes
+ case utils.ActionProfilesFilterIndexPrfx:
+ itemID = utils.CacheActionProfilesFilterIndexes
+ case utils.AccountFilterIndexPrfx:
+ itemID = utils.CacheAccountsFilterIndexes
+ case utils.FilterIndexPrfx:
+ itemID = utils.CacheReverseFilterIndexes
+ case utils.LoadIDPrefix:
+ itemID = utils.MetaLoadIDs
+ default:
+ return nil, fmt.Errorf("unknown prefix: <%v>", prefix)
+ }
+ dataDB, _, err = dbConns.GetConn(itemID)
+ return
+}
+
func (dm *DataManager) CacheDataFromDB(ctx *context.Context, prfx string, ids []string, mustBeCached bool) (err error) {
if dm == nil {
return utils.ErrNoDatabaseConn
@@ -130,7 +257,14 @@ func (dm *DataManager) CacheDataFromDB(ctx *context.Context, prfx string, ids []
if mustBeCached {
ids = Cache.GetItemIDs(utils.CachePrefixToInstance[prfx], utils.EmptyString)
} else {
- if ids, err = dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ dataDB, err := dm.dbConns.getDataDBForPrefix(prfx)
+ if err != nil {
+ return utils.NewCGRError(utils.DataManager,
+ utils.ServerErrorCaps,
+ err.Error(),
+ fmt.Sprintf("DataManager error <%s> for prefix <%s>", err.Error(), prfx))
+ }
+ if ids, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return utils.NewCGRError(utils.DataManager,
utils.ServerErrorCaps,
err.Error(),
@@ -335,17 +469,21 @@ func (dm *DataManager) GetFilter(ctx *context.Context, tenant, id string, cacheR
err = utils.ErrNoDatabaseConn
return
} else {
- fltr, err = dm.DataDB().GetFilterDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaFilters)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaFilters]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns, utils.ReplicatorSv1GetFilter,
+ return nil, err
+ }
+ fltr, err = dataDB.GetFilterDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaFilters]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns, utils.ReplicatorSv1GetFilter,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &fltr); err == nil {
- err = dm.dataDB.SetFilterDrv(ctx, fltr)
+ err = dataDB.SetFilterDrv(ctx, fltr)
}
}
if err != nil {
@@ -356,7 +494,7 @@ func (dm *DataManager) GetFilter(ctx *context.Context, tenant, id string, cacheR
return nil, errCh
}
}
- return
+ return fltr, err
}
}
if err = fltr.Compile(); err != nil { // only compile the value when we get the filter from DB or from remote0
@@ -381,7 +519,11 @@ func (dm *DataManager) SetFilter(ctx *context.Context, fltr *Filter, withIndex b
utils.NonTransactional); err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetFilterDrv(ctx, fltr); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetFilterDrv(ctx, fltr); err != nil {
return
}
if withIndex {
@@ -389,15 +531,15 @@ func (dm *DataManager) SetFilter(ctx *context.Context, fltr *Filter, withIndex b
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaFilters]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaFilters]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.FilterPrefix, fltr.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetFilter,
&FilterWithAPIOpts{
Filter: fltr,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -426,21 +568,25 @@ func (dm *DataManager) RemoveFilter(ctx *context.Context, tenant, id string, wit
tntCtx, utils.ToJSON(rcvIndx))
}
}
- if err = dm.DataDB().RemoveFilterDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveFilterDrv(ctx, tenant, id); err != nil {
return
}
if oldFlt == nil {
return utils.ErrNotFound
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaFilters]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaFilters]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.FilterPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveFilter,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -460,17 +606,21 @@ func (dm *DataManager) GetThreshold(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- th, err = dm.dataDB.GetThresholdDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholds)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholds]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ th, err = dataDB.GetThresholdDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholds]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetThreshold, &utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &th); err == nil {
- err = dm.dataDB.SetThresholdDrv(ctx, th)
+ err = dataDB.SetThresholdDrv(ctx, th)
}
}
if err != nil {
@@ -497,18 +647,22 @@ func (dm *DataManager) SetThreshold(ctx *context.Context, th *Threshold) (err er
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetThresholdDrv(ctx, th); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetThresholdDrv(ctx, th); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholds]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholds]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ThresholdPrefix, th.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetThreshold,
&ThresholdWithAPIOpts{
Threshold: th,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -517,18 +671,22 @@ func (dm *DataManager) RemoveThreshold(ctx *context.Context, tenant, id string)
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveThresholdDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveThresholdDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholds]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholds]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ThresholdPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveThreshold,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -548,18 +706,22 @@ func (dm *DataManager) GetThresholdProfile(ctx *context.Context, tenant, id stri
err = utils.ErrNoDatabaseConn
return
}
- th, err = dm.dataDB.GetThresholdProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholdProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholdProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ th, err = dataDB.GetThresholdProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholdProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetThresholdProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &th); err == nil {
- err = dm.dataDB.SetThresholdProfileDrv(ctx, th)
+ err = dataDB.SetThresholdProfileDrv(ctx, th)
}
}
if err != nil {
@@ -598,7 +760,11 @@ func (dm *DataManager) SetThresholdProfile(ctx *context.Context, th *ThresholdPr
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetThresholdProfileDrv(ctx, th); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetThresholdProfileDrv(ctx, th); err != nil {
return err
}
if withIndex {
@@ -611,15 +777,15 @@ func (dm *DataManager) SetThresholdProfile(ctx *context.Context, th *ThresholdPr
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholdProfiles]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholdProfiles]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ThresholdProfilePrefix, th.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetThresholdProfile,
&ThresholdProfileWithAPIOpts{
ThresholdProfile: th,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -652,7 +818,11 @@ func (dm *DataManager) RemoveThresholdProfile(ctx *context.Context, tenant, id s
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemThresholdProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaThresholdProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemThresholdProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldTh == nil {
@@ -667,15 +837,15 @@ func (dm *DataManager) RemoveThresholdProfile(ctx *context.Context, tenant, id s
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaThresholdProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaThresholdProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ThresholdProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveThresholdProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return dm.RemoveThreshold(ctx, tenant, id) // remove the threshold
}
@@ -697,24 +867,28 @@ func (dm *DataManager) GetStatQueue(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- sq, err = dm.dataDB.GetStatQueueDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueues)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueues]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns, utils.ReplicatorSv1GetStatQueue,
+ return nil, err
+ }
+ sq, err = dataDB.GetStatQueueDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueues]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns, utils.ReplicatorSv1GetStatQueue,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &sq); err == nil {
var ssq *StoredStatQueue
- if dm.dataDB.GetStorageType() != utils.MetaInternal {
+ if dataDB.GetStorageType() != utils.MetaInternal {
// in case of internal we don't marshal
if ssq, err = NewStoredStatQueue(sq, dm.ms); err != nil {
return nil, err
}
}
- err = dm.dataDB.SetStatQueueDrv(ctx, ssq, sq)
+ err = dataDB.SetStatQueueDrv(ctx, ssq, sq)
}
}
if err != nil {
@@ -742,25 +916,29 @@ func (dm *DataManager) SetStatQueue(ctx *context.Context, sq *StatQueue) (err er
if dm == nil {
return utils.ErrNoDatabaseConn
}
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
var ssq *StoredStatQueue
- if dm.dataDB.GetStorageType() != utils.MetaInternal {
+ if dataDB.GetStorageType() != utils.MetaInternal {
// in case of internal we don't marshal
if ssq, err = NewStoredStatQueue(sq, dm.ms); err != nil {
return
}
}
- if err = dm.dataDB.SetStatQueueDrv(ctx, ssq, sq); err != nil {
+ if err = dataDB.SetStatQueueDrv(ctx, ssq, sq); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueues]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueues]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.StatQueuePrefix, sq.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetStatQueue,
&StatQueueWithAPIOpts{
StatQueue: sq,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -770,18 +948,22 @@ func (dm *DataManager) RemoveStatQueue(ctx *context.Context, tenant, id string)
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.dataDB.RemStatQueueDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemStatQueueDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueues]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueues]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.StatQueuePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveStatQueue,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -801,18 +983,22 @@ func (dm *DataManager) GetStatQueueProfile(ctx *context.Context, tenant, id stri
err = utils.ErrNoDatabaseConn
return
}
- sqp, err = dm.dataDB.GetStatQueueProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueueProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueueProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ sqp, err = dataDB.GetStatQueueProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueueProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetStatQueueProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &sqp); err == nil {
- err = dm.dataDB.SetStatQueueProfileDrv(ctx, sqp)
+ err = dataDB.SetStatQueueProfileDrv(ctx, sqp)
}
}
if err != nil {
@@ -851,7 +1037,11 @@ func (dm *DataManager) SetStatQueueProfile(ctx *context.Context, sqp *StatQueueP
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetStatQueueProfileDrv(ctx, sqp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetStatQueueProfileDrv(ctx, sqp); err != nil {
return err
}
if withIndex {
@@ -864,15 +1054,15 @@ func (dm *DataManager) SetStatQueueProfile(ctx *context.Context, sqp *StatQueueP
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueueProfiles]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueueProfiles]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.StatQueueProfilePrefix, sqp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetStatQueueProfile,
&StatQueueProfileWithAPIOpts{
StatQueueProfile: sqp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -939,7 +1129,11 @@ func (dm *DataManager) RemoveStatQueueProfile(ctx *context.Context, tenant, id s
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemStatQueueProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemStatQueueProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldSts == nil {
@@ -954,15 +1148,15 @@ func (dm *DataManager) RemoveStatQueueProfile(ctx *context.Context, tenant, id s
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaStatQueueProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaStatQueueProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.StatQueueProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveStatQueueProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return dm.RemoveStatQueue(ctx, tenant, id)
}
@@ -984,24 +1178,28 @@ func (dm *DataManager) GetTrend(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- if tr, err = dm.dataDB.GetTrendDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrends)
+ if err != nil {
+ return nil, err
+ }
+ if tr, err = dataDB.GetTrendDrv(ctx, tenant, id); err != nil {
if err != utils.ErrNotFound { // database error
return
}
// ErrNotFound
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaTrends]; itm.Remote {
- if err = dm.connMgr.Call(context.TODO(), dm.cfg.DataDbCfg().RmtConns,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrends]; itm.Remote {
+ if err = dm.connMgr.Call(context.TODO(), dbCfg.RmtConns,
utils.ReplicatorSv1GetTrend, &utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &tr); err != nil {
err = utils.CastRPCErr(err)
if err != utils.ErrNotFound { // RPC error
return
}
- } else if err = dm.dataDB.SetTrendDrv(ctx, tr); err != nil {
+ } else if err = dataDB.SetTrendDrv(ctx, tr); err != nil {
return
}
}
@@ -1034,23 +1232,27 @@ func (dm *DataManager) SetTrend(ctx *context.Context, tr *utils.Trend) (err erro
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if dm.dataDB.GetStorageType() != utils.MetaInternal {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrends)
+ if err != nil {
+ return err
+ }
+ if dataDB.GetStorageType() != utils.MetaInternal {
if tr, err = tr.Compress(dm.ms, dm.cfg.TrendSCfg().StoreUncompressedLimit); err != nil {
return
}
}
- if err = dm.DataDB().SetTrendDrv(ctx, tr); err != nil {
+ if err = dataDB.SetTrendDrv(ctx, tr); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaTrends]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrends]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.TrendPrefix, tr.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetTrend,
&utils.TrendWithAPIOpts{
Trend: tr,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -1062,18 +1264,22 @@ func (dm *DataManager) RemoveTrend(ctx *context.Context, tenant, id string) (err
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveTrendDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrends)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveTrendDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaTrends]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrends]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.TrendPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveTrend,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1093,18 +1299,22 @@ func (dm *DataManager) GetTrendProfile(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- trp, err = dm.dataDB.GetTrendProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrendProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaTrendProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ trp, err = dataDB.GetTrendProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrendProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetTrendProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &trp); err == nil {
- err = dm.dataDB.SetTrendProfileDrv(ctx, trp)
+ err = dataDB.SetTrendProfileDrv(ctx, trp)
}
}
if err != nil {
@@ -1130,8 +1340,12 @@ func (dm *DataManager) GetTrendProfile(ctx *context.Context, tenant, id string,
func (dm *DataManager) GetTrendProfileIDs(ctx *context.Context, tenants []string) (tps map[string][]string, err error) {
prfx := utils.TrendProfilePrefix
var keys []string
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return nil, err
+ }
if len(tenants) == 0 {
- keys, err = dm.dataDB.GetKeysForPrefix(ctx, prfx)
+ keys, err = dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return
}
@@ -1139,7 +1353,7 @@ func (dm *DataManager) GetTrendProfileIDs(ctx *context.Context, tenants []string
for _, tenant := range tenants {
var tntkeys []string
tntPrfx := prfx + tenant + utils.ConcatenatedKeySep
- tntkeys, err = dm.dataDB.GetKeysForPrefix(ctx, tntPrfx)
+ tntkeys, err = dataDB.GetKeysForPrefix(ctx, tntPrfx)
if err != nil {
return
}
@@ -1168,18 +1382,22 @@ func (dm *DataManager) SetTrendProfile(ctx *context.Context, trp *utils.TrendPro
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetTrendProfileDrv(ctx, trp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrendProfiles)
+ if err != nil {
return err
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaTrendProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if err = dataDB.SetTrendProfileDrv(ctx, trp); err != nil {
+ return err
+ }
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrendProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.TrendProfilePrefix, trp.TenantID(),
utils.ReplicatorSv1SetTrendProfile,
&utils.TrendProfileWithAPIOpts{
TrendProfile: trp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
if oldTrd == nil ||
oldTrd.QueueLength != trp.QueueLength ||
@@ -1199,22 +1417,25 @@ func (dm *DataManager) RemoveTrendProfile(ctx *context.Context, tenant, id strin
if err != nil && err != utils.ErrNotFound {
return err
}
-
- if err = dm.DataDB().RemTrendProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaTrendProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemTrendProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldTrs == nil {
return utils.ErrNotFound
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankingProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaTrendProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.TrendProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveTrendProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return dm.RemoveTrend(ctx, tenant, id)
}
@@ -1233,18 +1454,22 @@ func (dm *DataManager) GetRankingProfile(ctx *context.Context, tenant, id string
err = utils.ErrNoDatabaseConn
return
}
- rgp, err = dm.dataDB.GetRankingProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankingProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankingProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(context.TODO(), dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ rgp, err = dataDB.GetRankingProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankingProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(context.TODO(), dbCfg.RmtConns,
utils.ReplicatorSv1GetRankingProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rgp); err == nil {
- err = dm.dataDB.SetRankingProfileDrv(ctx, rgp)
+ err = dataDB.SetRankingProfileDrv(ctx, rgp)
}
}
if err != nil {
@@ -1270,8 +1495,12 @@ func (dm *DataManager) GetRankingProfile(ctx *context.Context, tenant, id string
func (dm *DataManager) GetRankingProfileIDs(ctx *context.Context, tenants []string) (rns map[string][]string, err error) {
prfx := utils.RankingProfilePrefix
var keys []string
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return nil, err
+ }
if len(tenants) == 0 {
- keys, err = dm.dataDB.GetKeysForPrefix(ctx, prfx)
+ keys, err = dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return
}
@@ -1279,7 +1508,7 @@ func (dm *DataManager) GetRankingProfileIDs(ctx *context.Context, tenants []stri
for _, tenant := range tenants {
var tntkeys []string
tntPrfx := prfx + tenant + utils.ConcatenatedKeySep
- tntkeys, err = dm.dataDB.GetKeysForPrefix(ctx, tntPrfx)
+ tntkeys, err = dataDB.GetKeysForPrefix(ctx, tntPrfx)
if err != nil {
return
}
@@ -1307,18 +1536,22 @@ func (dm *DataManager) SetRankingProfile(ctx *context.Context, rnp *utils.Rankin
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetRankingProfileDrv(ctx, rnp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetRankingProfileDrv(ctx, rnp); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankingProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankingProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RankingProfilePrefix, rnp.TenantID(),
utils.ReplicatorSv1SetRankingProfile,
&utils.RankingProfileWithAPIOpts{
RankingProfile: rnp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
if oldRnk == nil || oldRnk.Sorting != rnp.Sorting ||
oldRnk.Schedule != rnp.Schedule {
@@ -1337,21 +1570,25 @@ func (dm *DataManager) RemoveRankingProfile(ctx *context.Context, tenant, id str
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemRankingProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankingProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemRankingProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldSgs == nil {
return utils.ErrNotFound
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankingProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankingProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RankingProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveRankingProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1369,20 +1606,24 @@ func (dm *DataManager) GetRanking(ctx *context.Context, tenant, id string, cache
err = utils.ErrNoDatabaseConn
return
}
- if rn, err = dm.dataDB.GetRankingDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankings)
+ if err != nil {
+ return nil, err
+ }
+ if rn, err = dataDB.GetRankingDrv(ctx, tenant, id); err != nil {
if err != utils.ErrNotFound { // database error
return
}
// ErrNotFound
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankings]; itm.Remote {
- if err = dm.connMgr.Call(context.TODO(), dm.cfg.DataDbCfg().RmtConns,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankings]; itm.Remote {
+ if err = dm.connMgr.Call(context.TODO(), dbCfg.RmtConns,
utils.ReplicatorSv1GetRanking, &utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rn); err == nil {
- err = dm.dataDB.SetRankingDrv(ctx, rn)
+ err = dataDB.SetRankingDrv(ctx, rn)
}
}
if err != nil {
@@ -1408,18 +1649,22 @@ func (dm *DataManager) SetRanking(ctx *context.Context, rn *utils.Ranking) (err
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetRankingDrv(ctx, rn); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankings)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetRankingDrv(ctx, rn); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankings]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankings]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RankingPrefix, rn.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetRanking,
&utils.RankingWithAPIOpts{
Ranking: rn,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -1431,18 +1676,22 @@ func (dm *DataManager) RemoveRanking(ctx *context.Context, tenant, id string) (e
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveRankingDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRankings)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRankingDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRankings]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRankings]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RankingPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveRanking,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1462,18 +1711,22 @@ func (dm *DataManager) GetResource(ctx *context.Context, tenant, id string, cach
err = utils.ErrNoDatabaseConn
return
}
- rs, err = dm.dataDB.GetResourceDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResources)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResources]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ rs, err = dataDB.GetResourceDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResources]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetResource,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rs); err == nil {
- err = dm.dataDB.SetResourceDrv(ctx, rs)
+ err = dataDB.SetResourceDrv(ctx, rs)
}
}
if err != nil {
@@ -1501,18 +1754,22 @@ func (dm *DataManager) SetResource(ctx *context.Context, rs *utils.Resource) (er
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetResourceDrv(ctx, rs); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResources)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetResourceDrv(ctx, rs); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResources]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResources]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ResourcesPrefix, rs.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetResource,
&utils.ResourceWithAPIOpts{
Resource: rs,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1521,18 +1778,22 @@ func (dm *DataManager) RemoveResource(ctx *context.Context, tenant, id string) (
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveResourceDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResources)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveResourceDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResources]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResources]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ResourcesPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveResource,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1552,17 +1813,21 @@ func (dm *DataManager) GetResourceProfile(ctx *context.Context, tenant, id strin
err = utils.ErrNoDatabaseConn
return
}
- rp, err = dm.dataDB.GetResourceProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResourceProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResourceProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ rp, err = dataDB.GetResourceProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResourceProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetResourceProfile, &utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rp); err == nil {
- err = dm.dataDB.SetResourceProfileDrv(ctx, rp)
+ err = dataDB.SetResourceProfileDrv(ctx, rp)
}
}
if err != nil {
@@ -1601,7 +1866,11 @@ func (dm *DataManager) SetResourceProfile(ctx *context.Context, rp *utils.Resour
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetResourceProfileDrv(ctx, rp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetResourceProfileDrv(ctx, rp); err != nil {
return err
}
if withIndex {
@@ -1615,15 +1884,15 @@ func (dm *DataManager) SetResourceProfile(ctx *context.Context, rp *utils.Resour
}
Cache.Clear([]string{utils.CacheEventResources})
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResourceProfiles]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResourceProfiles]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ResourceProfilesPrefix, rp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetResourceProfile,
&utils.ResourceProfileWithAPIOpts{
ResourceProfile: rp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -1655,7 +1924,11 @@ func (dm *DataManager) RemoveResourceProfile(ctx *context.Context, tenant, id st
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveResourceProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaResourceProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveResourceProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldRes == nil {
@@ -1670,15 +1943,15 @@ func (dm *DataManager) RemoveResourceProfile(ctx *context.Context, tenant, id st
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaResourceProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaResourceProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ResourceProfilesPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveResourceProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return dm.RemoveResource(ctx, tenant, id)
}
@@ -1702,18 +1975,22 @@ func (dm *DataManager) GetIPAllocations(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- ip, err = dm.dataDB.GetIPAllocationsDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPAllocations)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPAllocations]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ ip, err = dataDB.GetIPAllocationsDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPAllocations]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetIPAllocations,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &ip); err == nil {
- err = dm.dataDB.SetIPAllocationsDrv(ctx, ip)
+ err = dataDB.SetIPAllocationsDrv(ctx, ip)
}
}
if err != nil {
@@ -1744,18 +2021,22 @@ func (dm *DataManager) SetIPAllocations(ctx *context.Context, ip *utils.IPAlloca
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetIPAllocationsDrv(ctx, ip); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPAllocations)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIPAllocationsDrv(ctx, ip); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPAllocations]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPAllocations]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.IPAllocationsPrefix, ip.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetIPAllocations,
&utils.IPAllocationsWithAPIOpts{
IPAllocations: ip,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1764,18 +2045,22 @@ func (dm *DataManager) RemoveIPAllocations(ctx *context.Context, tenant, id stri
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveIPAllocationsDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPAllocations)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIPAllocationsDrv(ctx, tenant, id); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPAllocations]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPAllocations]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.IPAllocationsPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveIPAllocations,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -1795,17 +2080,21 @@ func (dm *DataManager) GetIPProfile(ctx *context.Context, tenant, id string, cac
err = utils.ErrNoDatabaseConn
return
}
- ipp, err = dm.dataDB.GetIPProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ ipp, err = dataDB.GetIPProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetIPProfile, &utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &ipp); err == nil {
- err = dm.dataDB.SetIPProfileDrv(ctx, ipp)
+ err = dataDB.SetIPProfileDrv(ctx, ipp)
}
}
if err != nil {
@@ -1844,7 +2133,11 @@ func (dm *DataManager) SetIPProfile(ctx *context.Context, ipp *utils.IPProfile,
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetIPProfileDrv(ctx, ipp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIPProfileDrv(ctx, ipp); err != nil {
return err
}
if withIndex {
@@ -1858,15 +2151,15 @@ func (dm *DataManager) SetIPProfile(ctx *context.Context, ipp *utils.IPProfile,
}
Cache.Clear([]string{utils.CacheEventIPs})
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPProfiles]; itm.Replicate {
- if err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPProfiles]; itm.Replicate {
+ if err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.IPProfilesPrefix, ipp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetIPProfile,
&utils.IPProfileWithAPIOpts{
IPProfile: ipp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)}); err != nil {
+ dbCfg.RplCache, utils.EmptyString)}); err != nil {
return
}
}
@@ -1897,7 +2190,11 @@ func (dm *DataManager) RemoveIPProfile(ctx *context.Context, tenant, id string,
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveIPProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaIPProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIPProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldIPP == nil {
@@ -1912,15 +2209,15 @@ func (dm *DataManager) RemoveIPProfile(ctx *context.Context, tenant, id string,
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaIPProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaIPProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.IPProfilesPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveIPProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return dm.RemoveIPAllocations(ctx, tenant, id)
}
@@ -1930,7 +2227,12 @@ func (dm *DataManager) HasData(category, subject, tenant string) (has bool, err
err = utils.ErrNoDatabaseConn
return
}
- return dm.DataDB().HasDataDrv(context.TODO(), category, subject, tenant)
+ for _, dataDB := range dm.DataDB() {
+ if has, err = dataDB.HasDataDrv(context.TODO(), category, subject, tenant); has {
+ return
+ }
+ }
+ return
}
func (dm *DataManager) GetRouteProfile(ctx *context.Context, tenant, id string, cacheRead, cacheWrite bool,
@@ -1948,17 +2250,21 @@ func (dm *DataManager) GetRouteProfile(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- rpp, err = dm.dataDB.GetRouteProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRouteProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRouteProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns, utils.ReplicatorSv1GetRouteProfile,
+ return nil, err
+ }
+ rpp, err = dataDB.GetRouteProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRouteProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns, utils.ReplicatorSv1GetRouteProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rpp); err == nil {
- err = dm.dataDB.SetRouteProfileDrv(ctx, rpp)
+ err = dataDB.SetRouteProfileDrv(ctx, rpp)
}
}
if err != nil {
@@ -2001,7 +2307,11 @@ func (dm *DataManager) SetRouteProfile(ctx *context.Context, rpp *utils.RoutePro
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetRouteProfileDrv(ctx, rpp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetRouteProfileDrv(ctx, rpp); err != nil {
return err
}
if withIndex {
@@ -2014,15 +2324,15 @@ func (dm *DataManager) SetRouteProfile(ctx *context.Context, rpp *utils.RoutePro
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRouteProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRouteProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RouteProfilePrefix, rpp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetRouteProfile,
&utils.RouteProfileWithAPIOpts{
RouteProfile: rpp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2035,7 +2345,11 @@ func (dm *DataManager) RemoveRouteProfile(ctx *context.Context, tenant, id strin
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveRouteProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRouteProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRouteProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldRpp == nil {
@@ -2050,15 +2364,15 @@ func (dm *DataManager) RemoveRouteProfile(ctx *context.Context, tenant, id strin
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRouteProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRouteProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RouteProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveRouteProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2082,17 +2396,21 @@ func (dm *DataManager) GetAttributeProfile(ctx *context.Context, tenant, id stri
err = utils.ErrNoDatabaseConn
return
} else {
- if attrPrfl, err = dm.dataDB.GetAttributeProfileDrv(ctx, tenant, id); err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAttributeProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return nil, err
+ }
+ if attrPrfl, err = dataDB.GetAttributeProfileDrv(ctx, tenant, id); err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAttributeProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetAttributeProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &attrPrfl); err == nil {
- err = dm.dataDB.SetAttributeProfileDrv(ctx, attrPrfl)
+ err = dataDB.SetAttributeProfileDrv(ctx, attrPrfl)
}
}
if err != nil {
@@ -2147,7 +2465,11 @@ func (dm *DataManager) SetAttributeProfile(ctx *context.Context, ap *utils.Attri
attribute.Type = utils.MetaConstant
}
}
- if err = dm.DataDB().SetAttributeProfileDrv(ctx, ap); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetAttributeProfileDrv(ctx, ap); err != nil {
return err
}
if withIndex {
@@ -2160,15 +2482,15 @@ func (dm *DataManager) SetAttributeProfile(ctx *context.Context, ap *utils.Attri
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAttributeProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAttributeProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.AttributeProfilePrefix, ap.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetAttributeProfile,
&utils.AttributeProfileWithAPIOpts{
AttributeProfile: ap,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2181,7 +2503,11 @@ func (dm *DataManager) RemoveAttributeProfile(ctx *context.Context, tenant, id s
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveAttributeProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAttributeProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveAttributeProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldAttr == nil {
@@ -2196,15 +2522,15 @@ func (dm *DataManager) RemoveAttributeProfile(ctx *context.Context, tenant, id s
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAttributeProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAttributeProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.AttributeProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveAttributeProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2224,18 +2550,22 @@ func (dm *DataManager) GetChargerProfile(ctx *context.Context, tenant, id string
err = utils.ErrNoDatabaseConn
return
}
- cpp, err = dm.dataDB.GetChargerProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaChargerProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaChargerProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ cpp, err = dataDB.GetChargerProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaChargerProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetChargerProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &cpp); err == nil {
- err = dm.dataDB.SetChargerProfileDrv(ctx, cpp)
+ err = dataDB.SetChargerProfileDrv(ctx, cpp)
}
}
if err != nil {
@@ -2274,7 +2604,11 @@ func (dm *DataManager) SetChargerProfile(ctx *context.Context, cpp *utils.Charge
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetChargerProfileDrv(ctx, cpp); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetChargerProfileDrv(ctx, cpp); err != nil {
return err
}
if withIndex {
@@ -2287,15 +2621,15 @@ func (dm *DataManager) SetChargerProfile(ctx *context.Context, cpp *utils.Charge
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaChargerProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaChargerProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ChargerProfilePrefix, cpp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetChargerProfile,
&utils.ChargerProfileWithAPIOpts{
ChargerProfile: cpp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2308,7 +2642,11 @@ func (dm *DataManager) RemoveChargerProfile(ctx *context.Context, tenant, id str
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveChargerProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveChargerProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldCpp == nil {
@@ -2323,15 +2661,15 @@ func (dm *DataManager) RemoveChargerProfile(ctx *context.Context, tenant, id str
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaChargerProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaChargerProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ChargerProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveChargerProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2341,19 +2679,23 @@ func (dm *DataManager) GetItemLoadIDs(ctx *context.Context, itemIDPrefix string,
err = utils.ErrNoDatabaseConn
return
}
- loadIDs, err = dm.DataDB().GetItemLoadIDsDrv(ctx, itemIDPrefix)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaLoadIDs)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaLoadIDs]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ loadIDs, err = dataDB.GetItemLoadIDsDrv(ctx, itemIDPrefix)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaLoadIDs]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetItemLoadIDs,
&utils.StringWithAPIOpts{
Arg: itemIDPrefix,
Tenant: dm.cfg.GeneralCfg().DefaultTenant,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &loadIDs); err == nil {
- err = dm.dataDB.SetLoadIDsDrv(ctx, loadIDs)
+ err = dataDB.SetLoadIDsDrv(ctx, loadIDs)
}
}
if err != nil {
@@ -2386,23 +2728,27 @@ func (dm *DataManager) SetLoadIDs(ctx *context.Context, loadIDs map[string]int64
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetLoadIDsDrv(ctx, loadIDs); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaLoadIDs)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetLoadIDsDrv(ctx, loadIDs); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaLoadIDs]; itm.Replicate {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaLoadIDs]; itm.Replicate {
objIDs := make([]string, 0, len(loadIDs))
for k := range loadIDs {
objIDs = append(objIDs, k)
}
- err = replicateMultipleIDs(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ err = replicateMultipleIDs(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.LoadIDPrefix, objIDs, // this are used to get the host IDs from cache
utils.ReplicatorSv1SetLoadIDs,
&utils.LoadIDsWithAPIOpts{
LoadIDs: loadIDs,
Tenant: dm.cfg.GeneralCfg().DefaultTenant,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2422,19 +2768,23 @@ func (dm *DataManager) GetRateProfile(ctx *context.Context, tenant, id string, c
err = utils.ErrNoDatabaseConn
return
}
- rpp, err = dm.dataDB.GetRateProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRateProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRateProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ rpp, err = dataDB.GetRateProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRateProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetRateProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &rpp); err == nil {
rpp.Sort()
- err = dm.dataDB.SetRateProfileDrv(ctx, rpp, false)
+ err = dataDB.SetRateProfileDrv(ctx, rpp, false)
}
}
if err != nil {
@@ -2465,7 +2815,11 @@ func (dm *DataManager) GetRateProfileRates(ctx *context.Context, args *utils.Arg
if dm == nil {
return nil, nil, utils.ErrNoDatabaseConn
}
- return dm.DataDB().GetRateProfileRatesDrv(ctx, args.Tenant, args.ProfileID, args.ItemsPrefix, needIDs)
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return
+ }
+ return dataDB.GetRateProfileRatesDrv(ctx, args.Tenant, args.ProfileID, args.ItemsPrefix, needIDs)
}
func (dm *DataManager) SetRateProfile(ctx *context.Context, rpp *utils.RateProfile, optOverwrite, withIndex bool) (err error) {
@@ -2521,18 +2875,22 @@ func (dm *DataManager) SetRateProfile(ctx *context.Context, rpp *utils.RateProfi
}
}
// if not overwriting, we will add the rates in case the profile is already in database, also the fields of the profile are changed too in case of the same tenantID
- if err = dm.DataDB().SetRateProfileDrv(ctx, rpp, optOverwrite); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRateProfiles)
+ if err != nil {
return err
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if err = dataDB.SetRateProfileDrv(ctx, rpp, optOverwrite); err != nil {
+ return err
+ }
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RateProfilePrefix, rpp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetRateProfile,
&utils.RateProfileWithAPIOpts{
RateProfile: rpp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2546,7 +2904,11 @@ func (dm *DataManager) RemoveRateProfile(ctx *context.Context, tenant, id string
if err != nil && err != utils.ErrNotFound {
return
}
- if err = dm.DataDB().RemoveRateProfileDrv(ctx, tenant, id, nil); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRateProfileDrv(ctx, tenant, id, nil); err != nil {
return
}
if oldRpp == nil {
@@ -2570,15 +2932,15 @@ func (dm *DataManager) RemoveRateProfile(ctx *context.Context, tenant, id string
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RateProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveRateProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2621,19 +2983,23 @@ func (dm *DataManager) RemoveRateProfileRates(ctx *context.Context, tenant, id s
delete(oldRpp.Rates, rateID)
}
}
- if err = dm.DataDB().RemoveRateProfileDrv(ctx, tenant, id, rateIDs); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveRateProfileDrv(ctx, tenant, id, rateIDs); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaRateProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.RateProfilePrefix, oldRpp.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetRateProfile,
&utils.RateProfileWithAPIOpts{
RateProfile: oldRpp,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2653,18 +3019,22 @@ func (dm *DataManager) GetActionProfile(ctx *context.Context, tenant, id string,
err = utils.ErrNoDatabaseConn
return
}
- ap, err = dm.dataDB.GetActionProfileDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaActionProfiles)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaActionProfiles]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ ap, err = dataDB.GetActionProfileDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaActionProfiles]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetActionProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &ap); err == nil {
- err = dm.dataDB.SetActionProfileDrv(ctx, ap)
+ err = dataDB.SetActionProfileDrv(ctx, ap)
}
}
if err != nil {
@@ -2703,7 +3073,11 @@ func (dm *DataManager) SetActionProfile(ctx *context.Context, ap *utils.ActionPr
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetActionProfileDrv(ctx, ap); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetActionProfileDrv(ctx, ap); err != nil {
return err
}
if withIndex {
@@ -2716,15 +3090,15 @@ func (dm *DataManager) SetActionProfile(ctx *context.Context, ap *utils.ActionPr
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaActionProfiles]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaActionProfiles]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ActionProfilePrefix, ap.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetActionProfile,
&utils.ActionProfileWithAPIOpts{
ActionProfile: ap,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2737,7 +3111,11 @@ func (dm *DataManager) RemoveActionProfile(ctx *context.Context, tenant, id stri
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveActionProfileDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaActionProfiles)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveActionProfileDrv(ctx, tenant, id); err != nil {
return
}
if oldAct == nil {
@@ -2752,24 +3130,52 @@ func (dm *DataManager) RemoveActionProfile(ctx *context.Context, tenant, id stri
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaActionProfiles]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaActionProfiles]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.ActionProfilePrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveActionProfile,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
-// Reconnect reconnects to the DB when the config was changed
-func (dm *DataManager) Reconnect(d DataDB) {
- // ToDo: consider locking
- dm.dataDB.Close()
- dm.dataDB = d
+// ReconnectAll reconnects all dataDBs when the config was changed. d implements all the new dataDB conns taken from the new config
+func (dm *DataManager) ReconnectAll(cfg *config.CGRConfig) (err error) {
+ dm.dbConns.Lock()
+ defer dm.dbConns.Unlock()
+ d := make(map[string]DataDBDriver)
+ for dbKey, dbConnCfg := range cfg.DbCfg().DBConns {
+ if dbConnCfg.Type == utils.MetaInternal {
+ // close internalDB before creating a new one
+ if datadb, ok := dm.DBConns().dataDBs[dbKey]; !ok {
+ return fmt.Errorf("couldnt find DataDB with dbConnID: <%s>", dbKey)
+ } else {
+ datadb.Close()
+ }
+ }
+ d[dbKey], err = NewDataDBConn(dbConnCfg.Type,
+ dbConnCfg.Host, dbConnCfg.Port, dbConnCfg.Name, dbConnCfg.User,
+ dbConnCfg.Password, cfg.GeneralCfg().DBDataEncoding,
+ dbConnCfg.StringIndexedFields, dbConnCfg.PrefixIndexedFields,
+ cfg.DbCfg().Opts, cfg.DbCfg().Items)
+ if err != nil {
+ return
+ }
+ for dataDBKey := range dm.dbConns.dataDBs { // close old DataManager conns
+ if dbConnCfg.Type != utils.MetaInternal { // old internalDB is already closed
+ dm.dbConns.dataDBs[dataDBKey].Close() // close all connections
+ }
+ delete(dm.dbConns.dataDBs, dataDBKey) // remove old keys so they dont get left over
+ }
+ for driverKey := range d { // put all the newly crated DataDBDrivers in DataManager Conns
+ dm.dbConns.dataDBs[driverKey] = d[driverKey]
+ }
+ }
+ return
}
func (dm *DataManager) GetIndexes(ctx *context.Context, idxItmType, tntCtx, idxKey, transactionID string,
@@ -2789,9 +3195,13 @@ func (dm *DataManager) GetIndexes(ctx *context.Context, idxItmType, tntCtx, idxK
}, nil
}
}
- if indexes, err = dm.DataDB().GetIndexesDrv(ctx, idxItmType, tntCtx, idxKey, transactionID); err != nil {
- if itm := dm.cfg.DataDbCfg().Items[idxItmType]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ dataDB, dbCfg, err := dm.dbConns.GetConn(idxItmType)
+ if err != nil {
+ return nil, err
+ }
+ if indexes, err = dataDB.GetIndexesDrv(ctx, idxItmType, tntCtx, idxKey, transactionID); err != nil {
+ if itm := dm.cfg.DbCfg().Items[idxItmType]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetIndexes,
&utils.GetIndexesArg{
IdxItmType: idxItmType,
@@ -2799,10 +3209,10 @@ func (dm *DataManager) GetIndexes(ctx *context.Context, idxItmType, tntCtx, idxK
IdxKey: idxKey,
Tenant: dm.cfg.GeneralCfg().DefaultTenant,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &indexes); err == nil {
- err = dm.dataDB.SetIndexesDrv(ctx, idxItmType, tntCtx, indexes, true, utils.NonTransactional)
+ err = dataDB.SetIndexesDrv(ctx, idxItmType, tntCtx, indexes, true, utils.NonTransactional)
}
}
if err != nil {
@@ -2833,13 +3243,17 @@ func (dm *DataManager) SetIndexes(ctx *context.Context, idxItmType, tntCtx strin
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().SetIndexesDrv(ctx, idxItmType, tntCtx,
+ dataDB, dbCfg, err := dm.dbConns.GetConn(idxItmType)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetIndexesDrv(ctx, idxItmType, tntCtx,
indexes, commit, transactionID); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[idxItmType]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[idxItmType]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.CacheInstanceToPrefix[idxItmType], tntCtx, // this are used to get the host IDs from cache
utils.ReplicatorSv1SetIndexes,
&utils.SetIndexesArg{
@@ -2848,7 +3262,7 @@ func (dm *DataManager) SetIndexes(ctx *context.Context, idxItmType, tntCtx strin
Indexes: indexes,
Tenant: dm.cfg.GeneralCfg().DefaultTenant,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2857,12 +3271,16 @@ func (dm *DataManager) RemoveIndexes(ctx *context.Context, idxItmType, tntCtx, i
if dm == nil {
return utils.ErrNoDatabaseConn
}
- if err = dm.DataDB().RemoveIndexesDrv(ctx, idxItmType, tntCtx, idxKey); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(idxItmType)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveIndexesDrv(ctx, idxItmType, tntCtx, idxKey); err != nil {
return
}
- if itm := dm.cfg.DataDbCfg().Items[idxItmType]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[idxItmType]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.CacheInstanceToPrefix[idxItmType], tntCtx, // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveIndexes,
&utils.GetIndexesArg{
@@ -2871,7 +3289,7 @@ func (dm *DataManager) RemoveIndexes(ctx *context.Context, idxItmType, tntCtx, i
IdxKey: idxKey,
Tenant: dm.cfg.GeneralCfg().DefaultTenant,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -2918,6 +3336,10 @@ func GetAPIBan(ctx *context.Context, ip string, apiKeys []string, single, cacheR
// checkFilters returns the id of the first Filter that is not valid
// it should be called after the dm nil check
func (dm *DataManager) checkFilters(ctx *context.Context, tenant string, ids []string) (err error) {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
for _, id := range ids {
// in case of inline filter we try to build them
// if they are not correct it should fail here not in indexes
@@ -2930,16 +3352,16 @@ func (dm *DataManager) checkFilters(ctx *context.Context, tenant string, ids []s
} else if x, has := Cache.Get(utils.CacheFilters, // because the method HasDataDrv doesn't use cache
utils.ConcatenatedKey(tenant, id)); has && x == nil { // check to see if filter is already in cache
return fmt.Errorf("broken reference to filter: <%s>", id)
- } else if has, err := dm.DataDB().HasDataDrv(ctx, utils.FilterPrefix, // check in local DB if we have the filter
+ } else if has, err := dataDB.HasDataDrv(ctx, utils.FilterPrefix, // check in local DB if we have the filter
id, tenant); err != nil || !has {
// in case we can not find it localy try to find it in the remote DB
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaFilters]; err == utils.ErrNotFound && itm.Remote {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaFilters]; err == utils.ErrNotFound && itm.Remote {
var fltr *Filter
- err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns, utils.ReplicatorSv1GetFilter,
+ err = dm.connMgr.Call(ctx, dbCfg.RmtConns, utils.ReplicatorSv1GetFilter,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &fltr)
has = fltr == nil
@@ -2958,18 +3380,22 @@ func (dm *DataManager) GetAccount(ctx *context.Context, tenant, id string) (ap *
err = utils.ErrNoDatabaseConn
return
}
- ap, err = dm.dataDB.GetAccountDrv(ctx, tenant, id)
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAccounts)
if err != nil {
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAccounts]; err == utils.ErrNotFound && itm.Remote {
- if err = dm.connMgr.Call(ctx, dm.cfg.DataDbCfg().RmtConns,
+ return nil, err
+ }
+ ap, err = dataDB.GetAccountDrv(ctx, tenant, id)
+ if err != nil {
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAccounts]; err == utils.ErrNotFound && itm.Remote {
+ if err = dm.connMgr.Call(ctx, dbCfg.RmtConns,
utils.ReplicatorSv1GetAccount,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID, utils.EmptyString,
- utils.FirstNonEmpty(dm.cfg.DataDbCfg().RmtConnID,
+ utils.FirstNonEmpty(dbCfg.RmtConnID,
dm.cfg.GeneralCfg().NodeID)),
}, &ap); err == nil {
- err = dm.dataDB.SetAccountDrv(ctx, ap)
+ err = dataDB.SetAccountDrv(ctx, ap)
}
}
if err != nil {
@@ -2994,7 +3420,11 @@ func (dm *DataManager) SetAccount(ctx *context.Context, ap *utils.Account, withI
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().SetAccountDrv(ctx, ap); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.SetAccountDrv(ctx, ap); err != nil {
return err
}
if withIndex {
@@ -3007,15 +3437,15 @@ func (dm *DataManager) SetAccount(ctx *context.Context, ap *utils.Account, withI
return err
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAccounts]; itm.Replicate {
- err = replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAccounts]; itm.Replicate {
+ err = replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.AccountPrefix, ap.TenantID(), // this are used to get the host IDs from cache
utils.ReplicatorSv1SetAccount,
&utils.AccountWithAPIOpts{
Account: ap,
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
@@ -3028,7 +3458,11 @@ func (dm *DataManager) RemoveAccount(ctx *context.Context, tenant, id string, wi
if err != nil && err != utils.ErrNotFound {
return err
}
- if err = dm.DataDB().RemoveAccountDrv(ctx, tenant, id); err != nil {
+ dataDB, dbCfg, err := dm.dbConns.GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveAccountDrv(ctx, tenant, id); err != nil {
return
}
if oldRpp == nil {
@@ -3043,15 +3477,78 @@ func (dm *DataManager) RemoveAccount(ctx *context.Context, tenant, id string, wi
return
}
}
- if itm := dm.cfg.DataDbCfg().Items[utils.MetaAccounts]; itm.Replicate {
- replicate(ctx, dm.connMgr, dm.cfg.DataDbCfg().RplConns,
- dm.cfg.DataDbCfg().RplFiltered,
+ if itm := dm.cfg.DbCfg().Items[utils.MetaAccounts]; itm.Replicate {
+ replicate(ctx, dm.connMgr, dbCfg.RplConns,
+ dbCfg.RplFiltered,
utils.AccountPrefix, utils.ConcatenatedKey(tenant, id), // this are used to get the host IDs from cache
utils.ReplicatorSv1RemoveAccount,
&utils.TenantIDWithAPIOpts{
TenantID: &utils.TenantID{Tenant: tenant, ID: id},
APIOpts: utils.GenerateDBItemOpts(itm.APIKey, itm.RouteID,
- dm.cfg.DataDbCfg().RplCache, utils.EmptyString)})
+ dbCfg.RplCache, utils.EmptyString)})
}
return
}
+
+// DumpDataDB will dump all of datadb from memory to a file. Only available for internal DBs
+func (dm *DataManager) DumpDataDB() error {
+ for _, db := range dm.DBConns().dataDBs {
+ if err := db.DumpDataDB(); err != nil && err != utils.ErrNotImplemented {
+ return err
+ }
+ }
+ return nil
+}
+
+// Will rewrite every dump file of DataDB. Only available for internal DBs
+func (dm *DataManager) RewriteDataDB() error {
+ for _, db := range dm.DBConns().dataDBs {
+ if err := db.RewriteDataDB(); err != nil && err != utils.ErrNotImplemented {
+ return err
+ }
+ }
+ return nil
+}
+
+// BackupDataDB will momentarely stop any dumping and rewriting in dataDB, until dump folder is backed up in folder path backupFolderPath. Making zip true will create a zip file in the path instead. Only available for internal DBs
+func (dm *DataManager) BackupDataDB(backupFolderPath string, zip bool) error {
+ for _, db := range dm.DBConns().dataDBs {
+ if err := db.BackupDataDB(backupFolderPath, zip); err != nil && err != utils.ErrNotImplemented {
+ return err
+ }
+ }
+ return nil
+}
+
+func (dm *DataManager) SetCDR(ctx *context.Context, cdr *utils.CGREvent, allowUpdate bool) (err error) {
+ if dm == nil {
+ return utils.ErrNoDatabaseConn
+ }
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaCDRs)
+ if err != nil {
+ return err
+ }
+ return dataDB.SetCDR(ctx, cdr, allowUpdate)
+}
+
+func (dm *DataManager) GetCDRs(ctx *context.Context, qryFltr []*Filter, opts map[string]any) ([]*utils.CDR, error) {
+ if dm == nil {
+ return nil, utils.ErrNoDatabaseConn
+ }
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaCDRs)
+ if err != nil {
+ return nil, err
+ }
+ return dataDB.GetCDRs(ctx, qryFltr, opts)
+}
+
+func (dm *DataManager) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err error) {
+ if dm == nil {
+ return utils.ErrNoDatabaseConn
+ }
+ dataDB, _, err := dm.dbConns.GetConn(utils.MetaCDRs)
+ if err != nil {
+ return err
+ }
+ return dataDB.RemoveCDRs(ctx, qryFltr)
+}
diff --git a/engine/filterhelpers.go b/engine/filterhelpers.go
index 5206fff9a..78a6b9ca2 100644
--- a/engine/filterhelpers.go
+++ b/engine/filterhelpers.go
@@ -50,8 +50,13 @@ func MatchingItemIDsForEvent(ctx *context.Context, ev utils.MapStorage, stringFl
lockID := utils.CacheInstanceToPrefix[cacheID] + itemIDPrefix
guardian.Guardian.Guard(ctx, func(ctx *context.Context) (_ error) {
if !indexedSelects {
+ var dataDB DataDB
+ dataDB, _, err = dm.dbConns.GetConn(cacheID)
+ if err != nil {
+ return
+ }
var keysWithID []string
- if keysWithID, err = dm.DataDB().GetKeysForPrefix(ctx, utils.CacheIndexesToPrefix[cacheID]); err != nil {
+ if keysWithID, err = dataDB.GetKeysForPrefix(ctx, utils.CacheIndexesToPrefix[cacheID]); err != nil {
return
}
var sliceIDs []string
diff --git a/engine/libindex.go b/engine/libindex.go
index 785ca7152..146980c99 100644
--- a/engine/libindex.go
+++ b/engine/libindex.go
@@ -293,9 +293,13 @@ func ComputeIndexes(ctx *context.Context, dm *DataManager, tnt, grp, idxItmType
var profilesIDs []string
if IDs == nil { // get all items
Cache.Clear([]string{idxItmType})
+ dataDB, _, err := dm.dbConns.GetConn(idxItmType)
+ if err != nil {
+ return nil, err
+ }
var ids []string
- if ids, err = dm.DataDB().GetKeysForPrefix(ctx, utils.CacheIndexesToPrefix[idxItmType]); err != nil {
- return
+ if ids, err = dataDB.GetKeysForPrefix(ctx, utils.CacheIndexesToPrefix[idxItmType]); err != nil {
+ return nil, err
}
for _, id := range ids {
profilesIDs = append(profilesIDs, utils.SplitConcatenatedKey(id)[1])
diff --git a/engine/libindex_health.go b/engine/libindex_health.go
index 62df24bbc..28594dd39 100644
--- a/engine/libindex_health.go
+++ b/engine/libindex_health.go
@@ -287,8 +287,12 @@ func GetFltrIdxHealth(ctx *context.Context, dm *DataManager, fltrCache, fltrIdxC
MissingFilters: make(map[string][]string),
}
objPrfx := utils.CacheIndexesToPrefix[indxType]
+ dataDB, _, err := dm.DBConns().GetConn(indxType)
+ if err != nil {
+ return
+ }
var ids []string
- if ids, err = dm.dataDB.GetKeysForPrefix(ctx, objPrfx); err != nil {
+ if ids, err = dataDB.GetKeysForPrefix(ctx, objPrfx); err != nil {
return
}
for _, id := range ids { // get all the objects from DB
@@ -307,7 +311,7 @@ func GetFltrIdxHealth(ctx *context.Context, dm *DataManager, fltrCache, fltrIdxC
// check the indexes( index->filter->obj relation)
idxPrfx := utils.CacheInstanceToPrefix[indxType]
var indexKeys []string
- if indexKeys, err = dm.dataDB.GetKeysForPrefix(ctx, idxPrfx); err != nil {
+ if indexKeys, err = dataDB.GetKeysForPrefix(ctx, idxPrfx); err != nil {
return
}
missingObj := utils.StringSet{}
@@ -381,8 +385,12 @@ func getRevFltrIdxHealthFromObj(ctx *context.Context, dm *DataManager, fltrCache
MissingFilters: make(map[string][]string),
}
objPrfx := utils.CacheIndexesToPrefix[indxType]
+ dataDB, _, err := dm.DBConns().GetConn(indxType)
+ if err != nil {
+ return
+ }
var ids []string
- if ids, err = dm.dataDB.GetKeysForPrefix(ctx, objPrfx); err != nil {
+ if ids, err = dataDB.GetKeysForPrefix(ctx, objPrfx); err != nil {
return
}
for _, id := range ids { // get all the objects
@@ -425,8 +433,12 @@ func getRevFltrIdxHealthFromObj(ctx *context.Context, dm *DataManager, fltrCache
// getRevFltrIdxHealthFromReverse parses the reverse indexes and updates the reply
func getRevFltrIdxHealthFromReverse(ctx *context.Context, dm *DataManager, fltrCache, revFltrIdxCache *ltcache.Cache, objCaches map[string]*ltcache.Cache, rply map[string]*ReverseFilterIHReply) (_ map[string]*ReverseFilterIHReply, err error) {
+ dataDB, _, err := dm.DBConns().GetConn(utils.CacheReverseFilterIndexes)
+ if err != nil {
+ return
+ }
var revIndexKeys []string
- if revIndexKeys, err = dm.dataDB.GetKeysForPrefix(ctx, utils.FilterIndexPrfx); err != nil {
+ if revIndexKeys, err = dataDB.GetKeysForPrefix(ctx, utils.FilterIndexPrfx); err != nil {
return
}
missingObj := utils.StringSet{}
@@ -548,8 +560,12 @@ func GetFltrIdxHealthForRateRates(ctx *context.Context, dm *DataManager, fltrCac
BrokenIndexes: make(map[string][]string),
MissingFilters: make(map[string][]string),
}
+ dataDB, _, err := dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return
+ }
var ids []string
- if ids, err = dm.dataDB.GetKeysForPrefix(ctx, utils.RateProfilePrefix); err != nil {
+ if ids, err = dataDB.GetKeysForPrefix(ctx, utils.RateProfilePrefix); err != nil {
return
}
for _, id := range ids {
@@ -569,7 +585,7 @@ func GetFltrIdxHealthForRateRates(ctx *context.Context, dm *DataManager, fltrCac
// check the indexes( index->filter->obj relation)
var indexKeys []string
- if indexKeys, err = dm.dataDB.GetKeysForPrefix(ctx, utils.RateFilterIndexPrfx); err != nil {
+ if indexKeys, err = dataDB.GetKeysForPrefix(ctx, utils.RateFilterIndexPrfx); err != nil {
return
}
for _, dataID := range indexKeys {
@@ -649,8 +665,12 @@ func getRevFltrIdxHealthFromRateRates(ctx *context.Context, dm *DataManager, flt
BrokenReverseIndexes: make(map[string][]string),
MissingFilters: make(map[string][]string),
}
+ dataDB, _, err := dm.DBConns().GetConn(utils.MetaRateProfiles)
+ if err != nil {
+ return
+ }
var ids []string
- if ids, err = dm.dataDB.GetKeysForPrefix(ctx, utils.RateProfilePrefix); err != nil {
+ if ids, err = dataDB.GetKeysForPrefix(ctx, utils.RateProfilePrefix); err != nil {
return
}
for _, id := range ids {
diff --git a/engine/stats.go b/engine/stats.go
index 8c9a95e4c..586030659 100644
--- a/engine/stats.go
+++ b/engine/stats.go
@@ -608,7 +608,11 @@ func (sS *StatS) V1GetQueueIDs(ctx *context.Context, args *utils.TenantWithAPIOp
tenant = sS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.StatQueuePrefix + tenant + utils.ConcatenatedKeySep
- keys, err := sS.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := sS.dm.DBConns().GetConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
diff --git a/engine/storage_interface.go b/engine/storage_interface.go
index 7d346c7f4..16ee7d0f4 100644
--- a/engine/storage_interface.go
+++ b/engine/storage_interface.go
@@ -110,6 +110,9 @@ type DataDB interface {
GetConfigSectionsDrv(*context.Context, string, []string) (map[string][]byte, error)
SetConfigSectionsDrv(*context.Context, string, map[string][]byte) error
RemoveConfigSectionsDrv(*context.Context, string, []string) error
+ SetCDR(*context.Context, *utils.CGREvent, bool) error
+ GetCDRs(*context.Context, []*Filter, map[string]any) ([]*utils.CDR, error)
+ RemoveCDRs(*context.Context, []*Filter) error
DumpDataDB() error
RewriteDataDB() error
BackupDataDB(string, bool) error
@@ -121,16 +124,6 @@ type DataDBDriver interface {
config.ConfigDB
}
-type StorDB interface {
- Storage
- SetCDR(*context.Context, *utils.CGREvent, bool) error
- GetCDRs(*context.Context, []*Filter, map[string]any) ([]*utils.CDR, error)
- RemoveCDRs(*context.Context, []*Filter) error
- DumpStorDB() error
- RewriteStorDB() error
- BackupStorDB(string, bool) error
-}
-
type LoadStorage interface {
Storage
LoadReader
diff --git a/engine/storage_internal_stordb.go b/engine/storage_internal_stordb.go
index 9b247c5ce..895ea44db 100644
--- a/engine/storage_internal_stordb.go
+++ b/engine/storage_internal_stordb.go
@@ -262,18 +262,3 @@ func (iDB *InternalDB) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err
}
return
}
-
-// Will dump everything inside stordb to files
-func (iDB *InternalDB) DumpStorDB() (err error) {
- return iDB.db.DumpAll()
-}
-
-// Will rewrite every dump file of StorDB
-func (iDB *InternalDB) RewriteStorDB() (err error) {
- return iDB.db.RewriteAll()
-}
-
-// BackupStorDB will momentarely stop any dumping and rewriting until all dump folder is backed up in folder path backupFolderPath, making zip true will create a zip file in the path instead
-func (iDB *InternalDB) BackupStorDB(backupFolderPath string, zip bool) (err error) {
- return iDB.db.BackupDumpFolder(backupFolderPath, zip)
-}
diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go
index 4e2411fe0..70ab1ab09 100644
--- a/engine/storage_mongo_datadb.go
+++ b/engine/storage_mongo_datadb.go
@@ -177,12 +177,11 @@ func mapStringAnyDecoderWithDecimal(dc bsoncodec.DecodeContext, vr bsonrw.ValueR
// NewMongoStorage initializes a new MongoDB storage instance with provided connection parameters and settings.
// Returns an error if the setup fails.
-func NewMongoStorage(scheme, host, port, db, user, pass, mrshlerStr string, storageType string,
+func NewMongoStorage(scheme, host, port, db, user, pass, mrshlerStr string,
cdrsIndexes []string, ttl time.Duration) (*MongoStorage, error) {
mongoStorage := &MongoStorage{
ctxTTL: ttl,
cdrsIndexes: cdrsIndexes,
- storageType: storageType,
counter: utils.NewCounter(time.Now().UnixNano(), 0),
}
uri := composeMongoURI(scheme, host, port, db, user, pass)
@@ -248,7 +247,6 @@ type MongoStorage struct {
ctxTTL time.Duration
ctxTTLMutex sync.RWMutex // used for TTL reload
db string
- storageType string // DataDB/StorDB
ms utils.Marshaler
cdrsIndexes []string
counter *utils.Counter
@@ -262,11 +260,6 @@ func (ms *MongoStorage) query(ctx *context.Context, argfunc func(ctx mongo.Sessi
return ms.client.UseSession(ctxSession, argfunc)
}
-// IsDataDB returns whether or not the storage is used for DataDB.
-func (ms *MongoStorage) IsDataDB() bool {
- return ms.storageType == utils.DataDB
-}
-
// SetTTL sets the context TTL used for queries (Thread-safe).
func (ms *MongoStorage) SetTTL(ttl time.Duration) {
ms.ctxTTLMutex.Lock()
@@ -348,15 +341,11 @@ func (ms *MongoStorage) ensureIndexesForCol(col string) error { // exported for
// EnsureIndexes creates database indexes for the specified collections.
func (ms *MongoStorage) EnsureIndexes(cols ...string) error {
if len(cols) == 0 {
- if ms.IsDataDB() {
- cols = []string{
- ColAct, ColApl, ColAAp, ColAtr, ColRpl, ColDst, ColRds, ColLht, ColIndx,
- ColRsP, ColRes, ColIPp, ColIPs, ColSqs, ColSqp, ColTps, ColThs, ColRts,
- ColAttr, ColFlt, ColCpp, ColRpp, ColApp, ColRpf, ColShg, ColAcc, ColAnp,
- ColTrd, ColTrs,
- }
- } else {
- cols = []string{utils.CDRsTBL}
+ cols = []string{
+ ColAct, ColApl, ColAAp, ColAtr, ColRpl, ColDst, ColRds, ColLht, ColIndx,
+ ColRsP, ColRes, ColIPp, ColIPs, ColSqs, ColSqp, ColTps, ColThs, ColRts,
+ ColAttr, ColFlt, ColCpp, ColRpp, ColApp, ColRpf, ColShg, ColAcc, ColAnp,
+ ColTrd, ColTrs, utils.CDRsTBL,
}
}
for _, col := range cols {
diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go
index 81c117155..200e7e418 100644
--- a/engine/storage_mongo_stordb.go
+++ b/engine/storage_mongo_stordb.go
@@ -405,18 +405,3 @@ func (ms *MongoStorage) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err
})
return err
}
-
-// RewriteStorDB used only for InternalDB
-func (ms *MongoStorage) DumpStorDB() (err error) {
- return utils.ErrNotImplemented
-}
-
-// RewriteStorDB used only for InternalDB
-func (ms *MongoStorage) RewriteStorDB() (err error) {
- return utils.ErrNotImplemented
-}
-
-// BackupStorDB used only for InternalDB
-func (ms *MongoStorage) BackupStorDB(backupFolderPath string, zip bool) (err error) {
- return utils.ErrNotImplemented
-}
diff --git a/engine/storage_mysql.go b/engine/storage_mysql.go
index 110fac189..ccbbf24b7 100644
--- a/engine/storage_mysql.go
+++ b/engine/storage_mysql.go
@@ -55,7 +55,7 @@ func NewMySQLStorage(host, port, name, user, password string,
return &SQLStorage{
DB: mySQLStorage.DB,
db: mySQLStorage.db,
- StorDB: mySQLStorage,
+ DataDB: mySQLStorage,
SQLImpl: mySQLStorage,
}, nil
}
diff --git a/engine/storage_postgres.go b/engine/storage_postgres.go
index 39e749c40..ec315748e 100644
--- a/engine/storage_postgres.go
+++ b/engine/storage_postgres.go
@@ -32,7 +32,7 @@ type PostgresStorage struct {
SQLStorage
}
-// NewPostgresStorage returns the posgres storDB
+// NewPostgresStorage returns the posgres DB
func NewPostgresStorage(host, port, name, user, password,
sslmode, sslcert, sslkey, sslpassword, sslcertmode, sslrootcert string,
maxConn, maxIdleConn, sqlLogLevel int, connMaxLifetime time.Duration) (*SQLStorage, error) {
@@ -73,7 +73,7 @@ func NewPostgresStorage(host, port, name, user, password,
return &SQLStorage{
DB: pgStor.DB,
db: pgStor.db,
- StorDB: pgStor,
+ DataDB: pgStor,
SQLImpl: pgStor,
}, nil
}
diff --git a/engine/storage_redis.go b/engine/storage_redis.go
index 42f4a451f..072817519 100644
--- a/engine/storage_redis.go
+++ b/engine/storage_redis.go
@@ -1113,6 +1113,21 @@ func (rs *RedisStorage) RemoveConfigSectionsDrv(ctx *context.Context, nodeID str
return
}
+// StorDB method not implemented yet
+func (rs *RedisStorage) SetCDR(_ *context.Context, cdr *utils.CGREvent, allowUpdate bool) error {
+ return utils.ErrNotImplemented
+}
+
+// StorDB method not implemented yet
+func (rs *RedisStorage) GetCDRs(ctx *context.Context, qryFltr []*Filter, opts map[string]any) ([]*utils.CDR, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// StorDB method not implemented yet
+func (rs *RedisStorage) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err error) {
+ return utils.ErrNotImplemented
+}
+
// DumpDataDB will dump all of datadb from memory to a file, only for InternalDB
func (rs *RedisStorage) DumpDataDB() error {
return utils.ErrNotImplemented
diff --git a/engine/storage_sql.go b/engine/storage_sql.go
index e79527654..bbbe36557 100644
--- a/engine/storage_sql.go
+++ b/engine/storage_sql.go
@@ -44,7 +44,7 @@ type SQLImpl interface {
type SQLStorage struct {
DB *sql.DB
db *gorm.DB
- StorDB
+ DataDB
SQLImpl
}
@@ -350,17 +350,399 @@ func (sqls *SQLStorage) RemoveCDRs(ctx *context.Context, qryFltr []*Filter) (err
return
}
-// Will dump everything inside stordb to a file, only for InternalDB
-func (sqls *SQLStorage) DumpStorDB() (err error) {
+// AddLoadHistory DataDB method not implemented yet
+func (sqls *SQLStorage) AddLoadHistory(ldInst *utils.LoadInstance,
+ loadHistSize int, transactionID string) error {
return utils.ErrNotImplemented
}
-// Will rewrite every dump file of StorDB, only for InternalDB
-func (sqls *SQLStorage) RewriteStorDB() (err error) {
+// Only intended for InternalDB
+func (sqls *SQLStorage) BackupConfigDB(backupFolderPath string, zip bool) (err error) {
return utils.ErrNotImplemented
}
-// BackupStorDB used only for InternalDB
-func (sqls *SQLStorage) BackupStorDB(backupFolderPath string, zip bool) (err error) {
+// BackupDataDB used only for InternalDB
+func (sqls *SQLStorage) BackupDataDB(backupFolderPath string, zip bool) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// Will dump everything inside DB to a file, only for InternalDB
+func (sqls *SQLStorage) DumpConfigDB() (err error) {
+ return utils.ErrNotImplemented
+}
+
+// Will dump everything inside DB to a file, only for InternalDB
+func (sqls *SQLStorage) DumpDataDB() (err error) {
+ return utils.ErrNotImplemented
+}
+
+// Will rewrite every dump file of DataDB, only for InternalDB
+func (sqls *SQLStorage) RewriteDataDB() (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) HasDataDrv(ctx *context.Context, category, subject, tenant string) (exists bool, err error) {
+ return false, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetLoadHistory(limit int, skipCache bool,
+ transactionID string) (loadInsts []*utils.LoadInstance, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetResourceProfileDrv(ctx *context.Context, tenant, id string) (rsp *utils.ResourceProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetResourceProfileDrv(ctx *context.Context, rsp *utils.ResourceProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveResourceProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetResourceDrv(ctx *context.Context, tenant, id string) (r *utils.Resource, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetResourceDrv(ctx *context.Context, r *utils.Resource) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveResourceDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetIPProfileDrv(ctx *context.Context, tenant, id string) (*utils.IPProfile, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetIPProfileDrv(ctx *context.Context, ipp *utils.IPProfile) error {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveIPProfileDrv(ctx *context.Context, tenant, id string) error {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetIPAllocationsDrv(ctx *context.Context, tenant, id string) (*utils.IPAllocations, error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetIPAllocationsDrv(ctx *context.Context, ip *utils.IPAllocations) error {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveIPAllocationsDrv(ctx *context.Context, tenant, id string) error {
+ return utils.ErrNotImplemented
+}
+
+// GetStatQueueProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) GetStatQueueProfileDrv(ctx *context.Context, tenant string, id string) (sq *StatQueueProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// SetStatQueueProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) SetStatQueueProfileDrv(ctx *context.Context, sq *StatQueueProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// RemStatQueueProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) RemStatQueueProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// GetStatQueueDrv DataDB method not implemented yet
+func (sqls *SQLStorage) GetStatQueueDrv(ctx *context.Context, tenant, id string) (sq *StatQueue, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// SetStatQueueDrv DataDB method not implemented yet
+func (sqls *SQLStorage) SetStatQueueDrv(ctx *context.Context, ssq *StoredStatQueue, sq *StatQueue) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// RemStatQueueDrv DataDB method not implemented yet
+func (sqls *SQLStorage) RemStatQueueDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetTrendProfileDrv(ctx *context.Context, sg *utils.TrendProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetTrendProfileDrv(ctx *context.Context, tenant string, id string) (sg *utils.TrendProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemTrendProfileDrv(ctx *context.Context, tenant string, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetTrendDrv(ctx *context.Context, tenant, id string) (r *utils.Trend, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetTrendDrv(ctx *context.Context, r *utils.Trend) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveTrendDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetRankingProfileDrv(ctx *context.Context, sg *utils.RankingProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetRankingProfileDrv(ctx *context.Context, tenant string, id string) (sg *utils.RankingProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemRankingProfileDrv(ctx *context.Context, tenant string, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetRankingDrv(ctx *context.Context, tenant, id string) (rn *utils.Ranking, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetRankingDrv(_ *context.Context, rn *utils.Ranking) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveRankingDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// GetThresholdProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) GetThresholdProfileDrv(ctx *context.Context, tenant, ID string) (tp *ThresholdProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// SetThresholdProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) SetThresholdProfileDrv(ctx *context.Context, tp *ThresholdProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// RemThresholdProfileDrv DataDB method not implemented yet
+func (sqls *SQLStorage) RemThresholdProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetThresholdDrv(ctx *context.Context, tenant, id string) (r *Threshold, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetThresholdDrv(ctx *context.Context, r *Threshold) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveThresholdDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetFilterDrv(ctx *context.Context, tenant, id string) (r *Filter, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetFilterDrv(ctx *context.Context, r *Filter) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveFilterDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetRouteProfileDrv(ctx *context.Context, tenant, id string) (r *utils.RouteProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetRouteProfileDrv(ctx *context.Context, r *utils.RouteProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveRouteProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetAttributeProfileDrv(ctx *context.Context, tenant, id string) (r *utils.AttributeProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetAttributeProfileDrv(ctx *context.Context, r *utils.AttributeProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveAttributeProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetChargerProfileDrv(_ *context.Context, tenant, id string) (r *utils.ChargerProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetChargerProfileDrv(_ *context.Context, r *utils.ChargerProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveChargerProfileDrv(_ *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// GetStorageType returns the storage type that is being used
+func (sqls *SQLStorage) GetStorageType() string {
+ return utils.MetaMySQL
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetItemLoadIDsDrv(ctx *context.Context, itemIDPrefix string) (loadIDs map[string]int64, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetLoadIDsDrv(ctx *context.Context, loadIDs map[string]int64) error {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveLoadIDsDrv() (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetRateProfileDrv(ctx *context.Context, rpp *utils.RateProfile, optOverwrite bool) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetRateProfileDrv(ctx *context.Context, tenant, id string) (rpp *utils.RateProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// GetRateProfileRateIDsDrv DataDB method not implemented yet
+func (sqls *SQLStorage) GetRateProfileRatesDrv(ctx *context.Context, tnt, profileID, rtPrfx string, needIDs bool) (rateIDs []string, rates []*utils.Rate, err error) {
+ return nil, nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveRateProfileDrv(ctx *context.Context, tenant, id string, rateIDs *[]string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetActionProfileDrv(ctx *context.Context, tenant, id string) (ap *utils.ActionProfile, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetActionProfileDrv(ctx *context.Context, ap *utils.ActionProfile) (err error) {
+ return utils.ErrNotImplemented
+}
+
+func (sqls *SQLStorage) RemoveActionProfileDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// GetIndexesDrv DataDB method not implemented yet
+func (sqls *SQLStorage) GetIndexesDrv(ctx *context.Context, idxItmType, tntCtx, idxKey, transactionID string) (indexes map[string]utils.StringSet, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// SetIndexesDrv DataDB method not implemented yet
+func (sqls *SQLStorage) SetIndexesDrv(ctx *context.Context, idxItmType, tntCtx string,
+ indexes map[string]utils.StringSet, commit bool, transactionID string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveIndexesDrv(ctx *context.Context, idxItmType, tntCtx, idxKey string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetAccountDrv(ctx *context.Context, tenant, id string) (ap *utils.Account, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetAccountDrv(ctx *context.Context, ap *utils.Account) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveAccountDrv(ctx *context.Context, tenant, id string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) GetConfigSectionsDrv(ctx *context.Context, nodeID string, sectionIDs []string) (sectionMap map[string][]byte, err error) {
+ return nil, utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) SetConfigSectionsDrv(ctx *context.Context, nodeID string, sectionsData map[string][]byte) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// DataDB method not implemented yet
+func (sqls *SQLStorage) RemoveConfigSectionsDrv(ctx *context.Context, nodeID string, sectionIDs []string) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// ConfigDB method not implemented yet
+func (sqls *SQLStorage) GetSection(ctx *context.Context, section string, val any) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// ConfigDB method not implemented yet
+func (sqls *SQLStorage) SetSection(_ *context.Context, section string, jsn any) (err error) {
+ return utils.ErrNotImplemented
+}
+
+// Only intended for InternalDB
+func (sqls *SQLStorage) RewriteConfigDB() (err error) {
return utils.ErrNotImplemented
}
diff --git a/engine/storage_utils.go b/engine/storage_utils.go
index f6ffc0323..3da7f5d9b 100644
--- a/engine/storage_utils.go
+++ b/engine/storage_utils.go
@@ -31,8 +31,8 @@ import (
// NewDataDBConn creates a DataDB connection
func NewDataDBConn(dbType, host, port, name, user,
- pass, marshaler string, opts *config.DataDBOpts,
- itmsCfg map[string]*config.ItemOpts) (d DataDBDriver, err error) {
+ pass, marshaler string, stringIndexedFields, prefixIndexedFields []string,
+ opts *config.DBOpts, itmsCfg map[string]*config.ItemOpts) (d DataDBDriver, err error) {
switch dbType {
case utils.MetaRedis:
var dbNo int
@@ -52,34 +52,18 @@ func NewDataDBConn(dbType, host, port, name, user,
opts.RedisPoolPipelineWindow, opts.RedisPoolPipelineLimit,
opts.RedisTLS, opts.RedisClientCertificate, opts.RedisClientKey, opts.RedisCACertificate)
case utils.MetaMongo:
- d, err = NewMongoStorage(opts.MongoConnScheme, host, port, name, user, pass, marshaler, utils.DataDB, nil, opts.MongoQueryTimeout)
- case utils.MetaInternal:
- d, err = NewInternalDB(nil, nil, opts.ToTransCacheOpts(), itmsCfg)
- default:
- err = fmt.Errorf("unsupported db_type <%s>", dbType)
- }
- return
-}
-
-// NewStorDBConn returns a StorDB(implements Storage interface) based on dbType
-func NewStorDBConn(dbType, host, port, name, user, pass, marshaler string,
- stringIndexedFields, prefixIndexedFields []string,
- opts *config.StorDBOpts, itmsCfg map[string]*config.ItemOpts) (db StorDB, err error) {
- switch dbType {
- case utils.MetaMongo:
- db, err = NewMongoStorage(opts.MongoConnScheme, host, port, name, user, pass, marshaler, utils.MetaStorDB, stringIndexedFields, opts.MongoQueryTimeout)
+ d, err = NewMongoStorage(opts.MongoConnScheme, host, port, name, user, pass, marshaler, stringIndexedFields, opts.MongoQueryTimeout)
case utils.MetaPostgres:
- db, err = NewPostgresStorage(host, port, name, user, pass, opts.PgSSLMode,
+ d, err = NewPostgresStorage(host, port, name, user, pass, opts.PgSSLMode,
opts.PgSSLCert, opts.PgSSLKey, opts.PgSSLPassword, opts.PgSSLCertMode, opts.PgSSLRootCert,
opts.SQLMaxOpenConns, opts.SQLMaxIdleConns, opts.SQLLogLevel, opts.SQLConnMaxLifetime)
case utils.MetaMySQL:
- db, err = NewMySQLStorage(host, port, name, user, pass, opts.SQLMaxOpenConns, opts.SQLMaxIdleConns,
+ d, err = NewMySQLStorage(host, port, name, user, pass, opts.SQLMaxOpenConns, opts.SQLMaxIdleConns,
opts.SQLLogLevel, opts.SQLConnMaxLifetime, opts.MySQLLocation, opts.SQLDSNParams)
case utils.MetaInternal:
- db, err = NewInternalDB(stringIndexedFields, prefixIndexedFields, opts.ToTransCacheOpts(), itmsCfg)
+ d, err = NewInternalDB(stringIndexedFields, prefixIndexedFields, opts.ToTransCacheOpts(), itmsCfg)
default:
- err = fmt.Errorf("unknown db '%s' valid options are [%s, %s, %s, %s]",
- dbType, utils.MetaMySQL, utils.MetaMongo, utils.MetaPostgres, utils.MetaInternal)
+ err = fmt.Errorf("unsupported db_type <%s>", dbType)
}
return
}
diff --git a/engine/thresholds.go b/engine/thresholds.go
index d8f0f1e96..19cad6c44 100644
--- a/engine/thresholds.go
+++ b/engine/thresholds.go
@@ -642,7 +642,11 @@ func (tS *ThresholdS) V1GetThresholdIDs(ctx *context.Context, args *utils.Tenant
tenant = tS.cfg.GeneralCfg().DefaultTenant
}
prfx := utils.ThresholdPrefix + tenant + utils.ConcatenatedKeySep
- keys, err := tS.dm.DataDB().GetKeysForPrefix(ctx, prfx)
+ dataDB, _, err := tS.dm.DBConns().GetConn(utils.MetaThresholds)
+ if err != nil {
+ return err
+ }
+ keys, err := dataDB.GetKeysForPrefix(ctx, prfx)
if err != nil {
return err
}
diff --git a/engine/tpreader.go b/engine/tpreader.go
index b92768d98..7188b29e2 100644
--- a/engine/tpreader.go
+++ b/engine/tpreader.go
@@ -50,9 +50,8 @@ type TpReader struct {
//schedulerConns []string
}
-func NewTpReader(db DataDB, lr LoadReader, tpid, timezone string,
+func NewTpReader(db *DBConnManager, lr LoadReader, tpid, timezone string,
cacheConns, schedulerConns []string) (*TpReader, error) {
-
tpr := &TpReader{
tpid: tpid,
timezone: timezone,
@@ -359,8 +358,10 @@ func (tpr *TpReader) LoadAll() (err error) {
}
func (tpr *TpReader) WriteToDatabase(verbose, disableReverse bool) (err error) {
- if tpr.dm.dataDB == nil {
- return errors.New("no database connection")
+ for _, db := range tpr.dm.DataDB() {
+ if db == nil {
+ return errors.New("no database connection")
+ }
}
//generate a loadID
loadID := time.Now().UnixNano()
diff --git a/engine/version.go b/engine/version.go
index e73fc2b6d..e5d186ea4 100644
--- a/engine/version.go
+++ b/engine/version.go
@@ -20,6 +20,7 @@ package engine
import (
"fmt"
+ "maps"
"github.com/cgrates/cgrates/utils"
)
@@ -52,8 +53,7 @@ func CheckVersions(storage Storage) error {
// Retrieve the current DB versions.
storType := storage.GetStorageType()
- isDataDB := isDataDB(storage)
- currentVersions := CurrentDBVersions(storType, isDataDB)
+ currentVersions := CurrentDBVersions(storType)
dbVersions, err := storage.GetVersions("")
if err == utils.ErrNotFound {
@@ -70,21 +70,15 @@ func CheckVersions(storage Storage) error {
return err
}
// Compare db versions with current versions.
- message := dbVersions.Compare(currentVersions, storType, isDataDB)
+ message := dbVersions.Compare(currentVersions, storType)
if message != "" {
return fmt.Errorf("Migration needed: please backup cgr data and run: <%s>", message)
}
return nil
}
-// relevant only for mongoDB
-func isDataDB(storage Storage) bool {
- conv, ok := storage.(*MongoStorage)
- return ok && conv.IsDataDB()
-}
-
func setDBVersions(storage Storage, overwrite bool) (err error) {
- x := CurrentDBVersions(storage.GetStorageType(), isDataDB(storage))
+ x := CurrentDBVersions(storage.GetStorageType())
// no data, write version
if err = storage.SetVersions(x, overwrite); err != nil {
utils.Logger.Warning(fmt.Sprintf("Could not write current version to db: %v", err))
@@ -104,7 +98,7 @@ func OverwriteDBVersions(storage Storage) (err error) {
}
// Compare returns the migration message if the versions are not the latest
-func (vers Versions) Compare(curent Versions, storType string, isDataDB bool) string {
+func (vers Versions) Compare(curent Versions, storType string) string {
var message map[string]string
switch storType {
case utils.MetaMongo:
@@ -162,20 +156,15 @@ func CurrentStorDBVersions() Versions {
func CurrentAllDBVersions() Versions {
dataDBVersions := CurrentDataDBVersions()
allVersions := make(Versions)
- for k, v := range dataDBVersions {
- allVersions[k] = v
- }
+ maps.Copy(allVersions, dataDBVersions)
return allVersions
}
// CurrentDBVersions returns versions based on dbType
-func CurrentDBVersions(storType string, isDataDB bool) Versions {
+func CurrentDBVersions(storType string) Versions {
switch storType {
case utils.MetaMongo:
- if isDataDB {
- return CurrentDataDBVersions()
- }
- return CurrentStorDBVersions()
+ return CurrentAllDBVersions()
case utils.MetaInternal:
return CurrentAllDBVersions()
case utils.MetaRedis:
diff --git a/migrator/accounts.go b/migrator/accounts.go
index f60e6d8f6..c0b758ec6 100644
--- a/migrator/accounts.go
+++ b/migrator/accounts.go
@@ -29,8 +29,16 @@ import (
)
func (m *Migrator) migrateCurrentAccounts() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaAccounts)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.AccountPrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.AccountPrefix)
if err != nil {
return err
}
@@ -39,17 +47,21 @@ func (m *Migrator) migrateCurrentAccounts() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating from account ", id)
}
- ap, err := m.dmIN.DataManager().GetAccount(context.TODO(), tntID[0], tntID[1])
+ ap, err := mInDB.DataManager().GetAccount(context.TODO(), tntID[0], tntID[1])
if err != nil {
return err
}
if ap == nil || m.dryRun {
continue
}
- if err := m.dmOut.DataManager().SetAccount(context.TODO(), ap, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaAccounts)
+ if err != nil {
return err
}
- if err := m.dmIN.DataManager().RemoveAccount(context.TODO(), tntID[0], tntID[1], false); err != nil {
+ if err := mOutDB.DataManager().SetAccount(context.TODO(), ap, true); err != nil {
+ return err
+ }
+ if err := mInDB.DataManager().RemoveAccount(context.TODO(), tntID[0], tntID[1], false); err != nil {
return err
}
m.stats[utils.AccountsString]++
diff --git a/migrator/action_profiles.go b/migrator/action_profiles.go
deleted file mode 100644
index 7c3966d33..000000000
--- a/migrator/action_profiles.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
-
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentActionProfiles() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ActionProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ActionProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating from action profiles", id)
- }
- ap, err := m.dmIN.DataManager().GetActionProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if ap == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetActionProfile(context.TODO(), ap, true); err != nil {
- return err
- }
- if err := m.dmIN.DataManager().RemoveActionProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
- return err
- }
- m.stats[utils.ActionProfiles]++
- }
- return
-}
-
-func (m *Migrator) migrateActionProfiles() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.ActionProfiles); err != nil {
- return
- }
- migrated := true
- for {
- version := vrs[utils.ActionProfiles]
- for {
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.ActionProfiles]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentActionProfiles(); err != nil {
- return
- }
- }
- if version == current[utils.ActionProfiles] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
- m.stats[utils.ActionProfiles]++
- }
- //All done, update version with current one
- if err = m.setVersions(utils.ActionProfiles); err != nil {
- return
- }
- return m.ensureIndexesDataDB(engine.ColApp)
-}
diff --git a/migrator/attributes.go b/migrator/attributes.go
deleted file mode 100644
index 0b4d27185..000000000
--- a/migrator/attributes.go
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-type v1Attribute struct {
- FieldName string
- Initial string
- Substitute string
- Append bool
-}
-
-type v1AttributeProfile struct {
- Tenant string
- ID string
- Contexts []string // bind this AttributeProfile to multiple contexts
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Activation interval
- Attributes map[string]map[string]*v1Attribute // map[FieldName][InitialValue]*Attribute
- Weight float64
-}
-
-func (m *Migrator) migrateCurrentAttributeProfile() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.AttributeProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating attributes", id)
- }
- attrPrf, err := m.dmIN.DataManager().GetAttributeProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if attrPrf == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetAttributeProfile(context.TODO(), attrPrf, true); err != nil {
- return err
- }
- if err := m.dmIN.DataManager().RemoveAttributeProfile(context.TODO(), tntID[0],
- tntID[1], false); err != nil {
- return err
- }
- m.stats[utils.Attributes]++
- }
- return
-}
-
-// migrateV1ToV2Attributes migrates attributeProfile from v1 to v2
-// for the moment the system in using the shortcut from v1 to v4
-func (m *Migrator) migrateV1ToV2Attributes() (v2Attr *v2AttributeProfile, err error) {
- var v1Attr *v1AttributeProfile
-
- v1Attr, err = m.dmIN.getV1AttributeProfile()
- if err != nil {
- return nil, err
- } else if v1Attr == nil {
- return nil, errors.New("Attribute NIL")
- }
-
- v2Attr, err = v1Attr.AsAttributeProfile()
- if err != nil {
- return nil, err
- }
-
- return
-}
-
-func (m *Migrator) migrateV1ToV4AttributeProfile() (v4Attr *v4AttributeProfile, err error) {
- var v1Attr *v1AttributeProfile
- v1Attr, err = m.dmIN.getV1AttributeProfile()
- if err != nil {
- return nil, err
- } else if v1Attr == nil {
- return nil, errors.New("Attribute NIL")
- }
-
- v4Attr, err = v1Attr.AsAttributeProfileV1To4()
- if err != nil {
- return nil, err
- }
- return
-}
-
-func (m *Migrator) migrateV2ToV3AttributeProfile(v2Attr *v2AttributeProfile) (v3Attr *v3AttributeProfile, err error) {
- if v2Attr == nil {
- // read data from DataDB
- v2Attr, err = m.dmIN.getV2AttributeProfile()
- if err != nil {
- return nil, err
- }
- }
- //Migrate the AttributeProfile to next version (from v2 to v3)
- v3Attr, err = v2Attr.AsAttributeProfile()
- if err != nil {
- return nil, err
- }
- return v3Attr, nil
-}
-
-func (m *Migrator) migrateV3ToV4AttributeProfile(v3Attr *v3AttributeProfile) (v4Attr *v4AttributeProfile, err error) {
- if v3Attr == nil {
- // read data from DataDB
- v3Attr, err = m.dmIN.getV3AttributeProfile()
- if err != nil {
- return nil, err
- }
- }
- //migrate
- v4Attr, err = v3Attr.AsAttributeProfile()
- if err != nil {
- return nil, err
- }
- return v4Attr, nil
-}
-
-func (m *Migrator) migrateV4ToV5AttributeProfile(v4Attr *v4AttributeProfile) (v5Attr *v6AttributeProfile, err error) {
- if v4Attr == nil {
- // read data from DataDB
- v4Attr, err = m.dmIN.getV4AttributeProfile()
- if err != nil {
- return nil, err
- }
- }
-
- v5Attr, err = v4Attr.AsAttributeProfile()
- if err != nil {
- return nil, err
- }
- return v5Attr, nil
-}
-
-func (m *Migrator) migrateAttributeProfile() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.Attributes); err != nil {
- return
- }
-
- migrated := true
- var v2Attr *v2AttributeProfile
- var v3Attr *v3AttributeProfile
- var v4Attr *v4AttributeProfile
- var v5Attr *v6AttributeProfile
- var v6Attr *v6AttributeProfile
- var v7Attr *utils.AttributeProfile
- for {
- // One attribute profile at a time
- version := vrs[utils.Attributes]
- for {
- //Keep migrating until Attribute Profile reaches latest version
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.Attributes]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentAttributeProfile(); err != nil { //generator like v1,2,3,4
- return
- }
- case 1: // Migrate from V1 to V4
- if v4Attr, err = m.migrateV1ToV4AttributeProfile(); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- //Update to version to 4 (shortcut)
- version = 4
- case 2: // Migrate from V2 to V3 (fallthrough untill latest version)
- if v3Attr, err = m.migrateV2ToV3AttributeProfile(v2Attr); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 3
- fallthrough
- case 3: // Migrate from V3 to V4
- if v4Attr, err = m.migrateV3ToV4AttributeProfile(v3Attr); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 4
- fallthrough
- case 4: // Migrate from V4 to V5
- if v5Attr, err = m.migrateV4ToV5AttributeProfile(v4Attr); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 5
- fallthrough
- case 5:
- if v6Attr, err = m.migrateV5ToV6AttributeProfile(v5Attr); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 6
- fallthrough
- case 6:
- if v7Attr, err = m.migrateV6ToV7AttributeProfile(v6Attr); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 7
- }
-
- if version == current[utils.Attributes] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
-
- if !m.dryRun {
- for _, attr := range v7Attr.Attributes {
- if attr.Path == utils.EmptyString { // we do not suppot empty Path in Attributes
- err = fmt.Errorf("the AttributeProfile <%s> was not migrated corectly", v7Attr.TenantID())
- return
- }
- }
- if vrs[utils.Attributes] == 1 {
- if err = m.dmOut.DataManager().DataDB().SetAttributeProfileDrv(context.TODO(), v7Attr); err != nil {
- return
- }
- }
- // Set the fresh-migrated AttributeProfile into DB
- if err = m.dmOut.DataManager().SetAttributeProfile(context.TODO(), v7Attr, true); err != nil {
- return err
- }
- }
- m.stats[utils.Attributes]++
-
- }
- if m.dryRun || !migrated {
- return nil
- }
- // All done, update version with current one
- if err = m.setVersions(utils.Attributes); err != nil {
- return
- }
- return m.ensureIndexesDataDB(engine.ColAttr)
-
-}
-
-func (v1AttrPrf v1AttributeProfile) AsAttributeProfile() (attrPrf *v2AttributeProfile, err error) {
- attrPrf = &v2AttributeProfile{
- Tenant: v1AttrPrf.Tenant,
- ID: v1AttrPrf.ID,
- Contexts: v1AttrPrf.Contexts,
- FilterIDs: v1AttrPrf.FilterIDs,
- ActivationInterval: v1AttrPrf.ActivationInterval,
- Weight: v1AttrPrf.Weight,
- }
- for _, mp := range v1AttrPrf.Attributes {
- for _, attr := range mp {
-
- sbstPrsr, err := utils.NewRSRParsers(attr.Substitute, utils.RSRSep)
- if err != nil {
- return nil, err
- }
- attrPrf.Attributes = append(attrPrf.Attributes, &v2Attribute{
- FieldName: attr.FieldName,
- Initial: attr.Initial,
- Substitute: sbstPrsr,
- Append: attr.Append,
- })
- }
- }
- return
-}
-
-func (v2AttrPrf v2AttributeProfile) AsAttributeProfile() (attrPrf *v3AttributeProfile, err error) {
- attrPrf = &v3AttributeProfile{
- Tenant: v2AttrPrf.Tenant,
- ID: v2AttrPrf.ID,
- Contexts: v2AttrPrf.Contexts,
- FilterIDs: v2AttrPrf.FilterIDs,
- ActivationInterval: v2AttrPrf.ActivationInterval,
- Weight: v2AttrPrf.Weight,
- }
- for _, attr := range v2AttrPrf.Attributes {
- filterIDs := make([]string, 0)
- //append false translate to if FieldName exist do stuff
- if attr.Append == false {
- filterIDs = append(filterIDs, utils.MetaExists+utils.InInFieldSep+attr.FieldName+utils.InInFieldSep)
- }
- //Initial not *any translate to if value of fieldName = initial do stuff
- initial := utils.IfaceAsString(attr.Initial)
- if initial != utils.MetaAny {
- filterIDs = append(filterIDs, utils.MetaString+utils.InInFieldSep+attr.FieldName+utils.InInFieldSep+initial)
- }
-
- attrPrf.Attributes = append(attrPrf.Attributes, &v3Attribute{
- FilterIDs: filterIDs,
- FieldName: attr.FieldName,
- Substitute: attr.Substitute,
- })
- }
- return
-}
-
-func (v1AttrPrf v1AttributeProfile) AsAttributeProfileV1To4() (attrPrf *v4AttributeProfile, err error) {
- attrPrf = &v4AttributeProfile{
- Tenant: v1AttrPrf.Tenant,
- ID: v1AttrPrf.ID,
- Contexts: v1AttrPrf.Contexts,
- FilterIDs: v1AttrPrf.FilterIDs,
- ActivationInterval: v1AttrPrf.ActivationInterval,
- Weight: v1AttrPrf.Weight,
- Blocker: false,
- }
- for _, mp := range v1AttrPrf.Attributes {
- for _, attr := range mp {
- // Create FilterIDs []string
- filterIDs := make([]string, 0)
- //append false translate to if FieldName exist do stuff
- if attr.Append == false {
- filterIDs = append(filterIDs, utils.MetaExists+utils.ConcatenatedKeySep+attr.FieldName+utils.ConcatenatedKeySep)
- }
- //Initial not *any translate to if value of fieldName = initial do stuff
- if attr.Initial != utils.MetaAny {
- filterIDs = append(filterIDs, utils.MetaString+utils.ConcatenatedKeySep+attr.FieldName+utils.ConcatenatedKeySep+attr.Initial)
- }
- // create RSRParser
- sbstPrsr, err := utils.NewRSRParsers(attr.Substitute, utils.RSRSep)
- if err != nil {
- return nil, err
- }
-
- attrPrf.Attributes = append(attrPrf.Attributes, &v4Attribute{
- FilterIDs: filterIDs,
- FieldName: attr.FieldName,
- Type: utils.MetaVariable,
- Value: sbstPrsr,
- })
- }
- }
- return
-}
-
-func (v3AttrPrf v3AttributeProfile) AsAttributeProfile() (attrPrf *v4AttributeProfile, err error) {
- attrPrf = &v4AttributeProfile{
- Tenant: v3AttrPrf.Tenant,
- ID: v3AttrPrf.ID,
- Contexts: v3AttrPrf.Contexts,
- FilterIDs: v3AttrPrf.FilterIDs,
- ActivationInterval: v3AttrPrf.ActivationInterval,
- Weight: v3AttrPrf.Weight,
- Blocker: false,
- }
- for _, attr := range v3AttrPrf.Attributes {
- attrPrf.Attributes = append(attrPrf.Attributes, &v4Attribute{
- FilterIDs: attr.FilterIDs,
- FieldName: attr.FieldName,
- Type: utils.MetaVariable,
- Value: attr.Substitute,
- })
- }
- return
-}
-
-func (v4AttrPrf v4AttributeProfile) AsAttributeProfile() (attrPrf *v6AttributeProfile, err error) {
- attrPrf = &v6AttributeProfile{
- Tenant: v4AttrPrf.Tenant,
- ID: v4AttrPrf.ID,
- Contexts: v4AttrPrf.Contexts,
- FilterIDs: v4AttrPrf.FilterIDs,
- Weight: v4AttrPrf.Weight,
- }
- for _, attr := range v4AttrPrf.Attributes { // ToDo:redo this
- val := attr.Value.GetRule()
- rsrVal := attr.Value
- if strings.HasPrefix(val, utils.DynamicDataPrefix) {
- val = val[1:] // remove the DynamicDataPrefix
- val = utils.DynamicDataPrefix + utils.MetaReq + utils.NestingSep + val
- rsrVal, err = utils.NewRSRParsers(val, utils.RSRSep)
- if err != nil {
- return nil, err
- }
- }
- var path string
- if attr.FieldName != utils.EmptyString {
- path = utils.MetaReq + utils.NestingSep + attr.FieldName
- }
- attrPrf.Attributes = append(attrPrf.Attributes, &v6Attribute{
- FilterIDs: attr.FilterIDs,
- Path: path,
- Value: rsrVal,
- Type: attr.Type,
- })
- }
- return
-}
-
-type v2Attribute struct {
- FieldName string
- Initial any
- Substitute utils.RSRParsers
- Append bool
-}
-
-type v2AttributeProfile struct {
- Tenant string
- ID string
- Contexts []string // bind this AttributeProfile to multiple contexts
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Activation interval
- Attributes []*v2Attribute
- Weight float64
-}
-
-type v3Attribute struct {
- FilterIDs []string
- FieldName string
- Substitute utils.RSRParsers
-}
-
-type v3AttributeProfile struct {
- Tenant string
- ID string
- Contexts []string // bind this AttributeProfile to multiple contexts
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Activation interval
- Attributes []*v3Attribute
- Weight float64
-}
-
-type v4Attribute struct {
- FilterIDs []string
- FieldName string
- Type string
- Value utils.RSRParsers
-}
-
-type v4AttributeProfile struct {
- Tenant string
- ID string
- Contexts []string // bind this AttributeProfile to multiple contexts
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Activation interval
- Attributes []*v4Attribute
- Blocker bool // blocker flag to stop processing on multiple runs
- Weight float64
-}
-
-// Attribute used by AttributeProfile to describe a single attribute
-type v6Attribute struct {
- FilterIDs []string
- Path string
- Type string
- Value utils.RSRParsers
-}
-
-// AttributeProfile the profile definition for the attributes
-type v6AttributeProfile struct {
- Tenant string
- ID string
- Contexts []string
- FilterIDs []string
- Attributes []*v6Attribute
- Blocker bool // blocker flag to stop processing on multiple runs
- Weight float64
-}
-
-func (m *Migrator) migrateV5ToV6AttributeProfile(v5Attr *v6AttributeProfile) (_ *v6AttributeProfile, err error) {
- if v5Attr == nil {
- // read data from DataDB
- if v5Attr, err = m.dmIN.getV5AttributeProfile(); err != nil {
- return
- }
- }
- if v5Attr.FilterIDs, err = migrateInlineFilterV4(v5Attr.FilterIDs); err != nil {
- return
- }
- return v5Attr, nil
-}
-
-func (m *Migrator) migrateV6ToV7AttributeProfile(v6Attr *v6AttributeProfile) (_ *utils.AttributeProfile, err error) {
-
- if v6Attr == nil {
- // read data from DataDB
- if v6Attr, err = m.dmIN.getV5AttributeProfile(); err != nil {
- return
- }
- }
-
- return v6Attr.AsAttributeProfile(), nil
-}
-
-func (v6AttrPrf v6AttributeProfile) AsAttributeProfile() (attrPrf *utils.AttributeProfile) {
- fltr := "*string:~*opts.*context:"
- for _, ctx := range v6AttrPrf.Contexts {
- if ctx != utils.MetaAny {
- fltr += ctx + utils.PipeSep
- }
- }
-
- attrPrf = &utils.AttributeProfile{
- Tenant: v6AttrPrf.Tenant,
- ID: v6AttrPrf.ID,
- FilterIDs: v6AttrPrf.FilterIDs,
- Attributes: make([]*utils.Attribute, len(v6AttrPrf.Attributes)),
- }
- attrPrf.Blockers = make(utils.DynamicBlockers, 1)
- attrPrf.Blockers[0].Blocker = v6AttrPrf.Blocker
- attrPrf.Weights = make(utils.DynamicWeights, 1)
- attrPrf.Weights[0].Weight = v6AttrPrf.Weight
- if strings.HasSuffix(fltr, utils.PipeSep) {
- attrPrf.FilterIDs = append(attrPrf.FilterIDs, strings.TrimSuffix(fltr, utils.PipeSep))
- }
-
- for idx, attr := range v6AttrPrf.Attributes {
- attrPrf.Attributes[idx] = &utils.Attribute{
- FilterIDs: attr.FilterIDs,
- Path: attr.Path,
- Type: attr.Type,
- Value: attr.Value,
- }
- }
- return
-}
diff --git a/migrator/chargers.go b/migrator/chargers.go
index c08db7643..3f226fca6 100644
--- a/migrator/chargers.go
+++ b/migrator/chargers.go
@@ -29,8 +29,16 @@ import (
)
func (m *Migrator) migrateCurrentCharger() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
if err != nil {
return err
}
@@ -39,17 +47,21 @@ func (m *Migrator) migrateCurrentCharger() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating chargers", id)
}
- cpp, err := m.dmIN.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ cpp, err := mInDB.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return err
}
if cpp == nil || m.dryRun {
continue
}
- if err := m.dmOut.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaChargerProfiles)
+ if err != nil {
return err
}
- if err := m.dmIN.DataManager().RemoveChargerProfile(context.TODO(), tntID[0],
+ if err := mOutDB.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
+ return err
+ }
+ if err := mInDB.DataManager().RemoveChargerProfile(context.TODO(), tntID[0],
tntID[1], false); err != nil {
return err
}
@@ -98,8 +110,12 @@ func (m *Migrator) migrateChargers() (err error) {
if !m.dryRun {
//set action plan
- if err = m.dmOut.DataManager().SetChargerProfile(context.TODO(), v2, true); err != nil {
- return
+ mOutDB, err := m.GetOUTConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err = mOutDB.DataManager().SetChargerProfile(context.TODO(), v2, true); err != nil {
+ return err
}
}
m.stats[utils.Chargers]++
@@ -112,7 +128,11 @@ func (m *Migrator) migrateChargers() (err error) {
}
func (m *Migrator) migrateV1ToV2Chargers() (v4Cpp *utils.ChargerProfile, err error) {
- v4Cpp, err = m.dmIN.getV1ChargerProfile()
+ mInDB, err := m.GetINConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return
+ }
+ v4Cpp, err = mInDB.getV1ChargerProfile()
if err != nil {
return nil, err
} else if v4Cpp == nil {
diff --git a/migrator/filters.go b/migrator/filters.go
index a42c9219e..dd53ad21e 100644
--- a/migrator/filters.go
+++ b/migrator/filters.go
@@ -29,8 +29,16 @@ import (
)
func (m *Migrator) migrateCurrentRequestFilter() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.FilterPrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.FilterPrefix)
if err != nil {
return
}
@@ -40,19 +48,23 @@ func (m *Migrator) migrateCurrentRequestFilter() (err error) {
return fmt.Errorf("Invalid key <%s> when migrating filters", id)
}
var fl *engine.Filter
- if fl, err = m.dmIN.DataManager().GetFilter(context.TODO(), tntID[0], tntID[1], false, false,
+ if fl, err = mInDB.DataManager().GetFilter(context.TODO(), tntID[0], tntID[1], false, false,
utils.NonTransactional); err != nil {
return
}
if m.dryRun || fl == nil {
continue
}
- if err = m.dmOut.DataManager().SetFilter(context.TODO(), fl, true); err != nil {
- return
+ mOutDB, err := m.GetOUTConn(utils.MetaFilters)
+ if err != nil {
+ return err
}
- if err = m.dmIN.DataManager().RemoveFilter(context.TODO(), tntID[0], tntID[1],
+ if err = mOutDB.DataManager().SetFilter(context.TODO(), fl, true); err != nil {
+ return err
+ }
+ if err = mInDB.DataManager().RemoveFilter(context.TODO(), tntID[0], tntID[1],
true); err != nil {
- return
+ return err
}
m.stats[utils.RQF]++
}
@@ -185,21 +197,9 @@ func migrateInlineFilterV2(fl string) string {
}
func (m *Migrator) migrateOthersv1() (err error) {
- if err = m.migrateResourceProfileFiltersV1(); err != nil {
- return err
- }
if err = m.migrateStatQueueProfileFiltersV1(); err != nil {
return err
}
- if err = m.migrateThresholdsProfileFiltersV1(); err != nil {
- return err
- }
- if err = m.migrateSupplierProfileFiltersV1(); err != nil {
- return err
- }
- if err = m.migrateAttributeProfileFiltersV1(); err != nil {
- return err
- }
if err = m.migrateChargerProfileFiltersV1(); err != nil {
return err
}
@@ -207,8 +207,12 @@ func (m *Migrator) migrateOthersv1() (err error) {
}
func (m *Migrator) migrateRequestFilterV1() (fltr *engine.Filter, err error) {
+ mInDB, err := m.GetINConn(utils.MetaFilters)
+ if err != nil {
+ return nil, err
+ }
var v1Fltr *v1Filter
- if v1Fltr, err = m.dmIN.getV1Filter(); err != nil {
+ if v1Fltr, err = mInDB.getV1Filter(); err != nil {
return
}
if v1Fltr == nil {
@@ -219,26 +223,10 @@ func (m *Migrator) migrateRequestFilterV1() (fltr *engine.Filter, err error) {
}
func (m *Migrator) migrateOthersV2() (err error) {
- if err = m.migrateResourceProfileFiltersV2(); err != nil {
- return fmt.Errorf("Error: <%s> when trying to migrate filter for ResourceProfiles",
- err.Error())
- }
if err = m.migrateStatQueueProfileFiltersV2(); err != nil {
return fmt.Errorf("Error: <%s> when trying to migrate filter for StatQueueProfiles",
err.Error())
}
- if err = m.migrateThresholdsProfileFiltersV2(); err != nil {
- return fmt.Errorf("Error: <%s> when trying to migrate filter for ThresholdProfiles",
- err.Error())
- }
- if err = m.migrateSupplierProfileFiltersV2(); err != nil {
- return fmt.Errorf("Error: <%s> when trying to migrate filter for SupplierProfiles",
- err.Error())
- }
- if err = m.migrateAttributeProfileFiltersV2(); err != nil {
- return fmt.Errorf("Error: <%s> when trying to migrate filter for AttributeProfiles",
- err.Error())
- }
if err = m.migrateChargerProfileFiltersV2(); err != nil {
return fmt.Errorf("Error: <%s> when trying to migrate filter for ChargerProfiles",
err.Error())
@@ -247,8 +235,12 @@ func (m *Migrator) migrateOthersV2() (err error) {
}
func (m *Migrator) migrateRequestFilterV2() (fltr *engine.Filter, err error) {
+ mInDB, err := m.GetINConn(utils.MetaFilters)
+ if err != nil {
+ return nil, err
+ }
var v1Fltr *v1Filter
- if v1Fltr, err = m.dmIN.getV1Filter(); err != nil {
+ if v1Fltr, err = mInDB.getV1Filter(); err != nil {
return nil, err
}
if err == utils.ErrNoMoreData {
@@ -259,8 +251,12 @@ func (m *Migrator) migrateRequestFilterV2() (fltr *engine.Filter, err error) {
}
func (m *Migrator) migrateRequestFilterV3() (fltr *engine.Filter, err error) {
+ mInDB, err := m.GetINConn(utils.MetaFilters)
+ if err != nil {
+ return nil, err
+ }
var v1Fltr *v1Filter
- if v1Fltr, err = m.dmIN.getV1Filter(); err != nil {
+ if v1Fltr, err = mInDB.getV1Filter(); err != nil {
return nil, err
}
if v1Fltr == nil {
@@ -353,38 +349,17 @@ func (m *Migrator) migrateFilters() (err error) {
return m.ensureIndexesDataDB(engine.ColFlt)
}
-func (m *Migrator) migrateResourceProfileFiltersV1() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ResourceProfilesPrefix)
+func (m *Migrator) migrateStatQueueProfileFiltersV1() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
if err != nil {
return err
}
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ResourceProfilesPrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for resourceProfile", id)
- }
- res, err := m.dmIN.DataManager().GetResourceProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if m.dryRun || res == nil {
- continue
- }
- for i, fl := range res.FilterIDs {
- res.FilterIDs[i] = migrateInlineFilter(fl)
- }
- if err := m.dmOut.DataManager().SetResourceProfile(context.TODO(), res, true); err != nil {
- return err
- }
- m.stats[utils.RQF]++
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
}
- return
-}
-
-func (m *Migrator) migrateStatQueueProfileFiltersV1() (err error) {
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.StatQueueProfilePrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.StatQueueProfilePrefix)
if err != nil {
return err
}
@@ -393,7 +368,11 @@ func (m *Migrator) migrateStatQueueProfileFiltersV1() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating filter for statQueueProfile", id)
}
- sgs, err := m.dmIN.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ sgs, err := mInDB.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return err
}
@@ -403,100 +382,11 @@ func (m *Migrator) migrateStatQueueProfileFiltersV1() (err error) {
for i, fl := range sgs.FilterIDs {
sgs.FilterIDs[i] = migrateInlineFilter(fl)
}
- if err = m.dmOut.DataManager().SetStatQueueProfile(context.TODO(), sgs, true); err != nil {
- return err
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
-func (m *Migrator) migrateThresholdsProfileFiltersV1() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ThresholdProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ThresholdProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for thresholdProfile", id)
- }
- ths, err := m.dmIN.DataManager().GetThresholdProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueueProfiles)
if err != nil {
return err
}
- if ths == nil || m.dryRun {
- continue
- }
- for i, fl := range ths.FilterIDs {
- ths.FilterIDs[i] = migrateInlineFilter(fl)
- }
- if err := m.dmOut.DataManager().SetThresholdProfile(context.TODO(), ths, true); err != nil {
- return err
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
-func (m *Migrator) migrateSupplierProfileFiltersV1() (err error) {
- for {
- var spp *SupplierProfile
- spp, err = m.dmIN.getSupplier()
- if err == utils.ErrNoMoreData {
- err = nil
- break
- }
- if err != nil {
- return err
- }
- if spp == nil || m.dryRun {
- continue
- }
- for i, fl := range spp.FilterIDs {
- spp.FilterIDs[i] = migrateInlineFilter(fl)
- }
- if err := m.dmOut.setSupplier(spp); err != nil {
- return err
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
-func (m *Migrator) migrateAttributeProfileFiltersV1() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.AttributeProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for attributeProfile", id)
- }
- attrPrf, err := m.dmIN.DataManager().GetAttributeProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if attrPrf == nil || m.dryRun {
- continue
- }
- for i, fl := range attrPrf.FilterIDs {
- attrPrf.FilterIDs[i] = migrateInlineFilter(fl)
- }
- for i, attr := range attrPrf.Attributes {
- if attr.Path == utils.EmptyString {
- // in case of older version of attributes we do not have Path
- // stop the inline migration until the attributes are migrated
- return fmt.Errorf("error: when migrating filter for attribute profile: <%s>", attrPrf.TenantID())
- }
- for j, fl := range attr.FilterIDs {
- attrPrf.Attributes[i].FilterIDs[j] = migrateInlineFilter(fl)
- }
- }
- if err := m.dmOut.DataManager().SetAttributeProfile(context.TODO(), attrPrf, true); err != nil {
+ if err = mOutDB.DataManager().SetStatQueueProfile(context.TODO(), sgs, true); err != nil {
return err
}
m.stats[utils.RQF]++
@@ -505,8 +395,16 @@ func (m *Migrator) migrateAttributeProfileFiltersV1() (err error) {
}
func (m *Migrator) migrateChargerProfileFiltersV1() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
if err != nil {
return err
}
@@ -515,7 +413,7 @@ func (m *Migrator) migrateChargerProfileFiltersV1() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating filter for chragerProfile", id)
}
- cpp, err := m.dmIN.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ cpp, err := mInDB.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return err
}
@@ -525,7 +423,11 @@ func (m *Migrator) migrateChargerProfileFiltersV1() (err error) {
for i, fl := range cpp.FilterIDs {
cpp.FilterIDs[i] = migrateInlineFilter(fl)
}
- if err := m.dmOut.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err := mOutDB.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
return err
}
m.stats[utils.RQF]++
@@ -533,41 +435,17 @@ func (m *Migrator) migrateChargerProfileFiltersV1() (err error) {
return
}
-// migrate filters from v2 to v3 for items
-func (m *Migrator) migrateResourceProfileFiltersV2() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ResourceProfilesPrefix)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting resource profile IDs", err.Error())
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ResourceProfilesPrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for resourcerProfile", id)
- }
- res, err := m.dmIN.DataManager().GetResourceProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting resource profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- if m.dryRun || res == nil {
- continue
- }
- for i, fl := range res.FilterIDs {
- res.FilterIDs[i] = migrateInlineFilterV2(fl)
- }
- if err := m.dmOut.DataManager().SetResourceProfile(context.TODO(), res, true); err != nil {
- return fmt.Errorf("error: <%s> when setting resource profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
func (m *Migrator) migrateStatQueueProfileFiltersV2() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.StatQueueProfilePrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.StatQueueProfilePrefix)
if err != nil {
return fmt.Errorf("error: <%s> when getting statQueue profile IDs", err.Error())
}
@@ -576,7 +454,7 @@ func (m *Migrator) migrateStatQueueProfileFiltersV2() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating filter for statQueueProfile", id)
}
- sgs, err := m.dmIN.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ sgs, err := mInDB.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return fmt.Errorf("error: <%s> when getting statQueue profile with tenant: <%s> and id: <%s>",
err.Error(), tntID[0], tntID[1])
@@ -587,7 +465,11 @@ func (m *Migrator) migrateStatQueueProfileFiltersV2() (err error) {
for i, fl := range sgs.FilterIDs {
sgs.FilterIDs[i] = migrateInlineFilterV2(fl)
}
- if err = m.dmOut.DataManager().SetStatQueueProfile(context.TODO(), sgs, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err = mOutDB.DataManager().SetStatQueueProfile(context.TODO(), sgs, true); err != nil {
return fmt.Errorf("error: <%s> when setting statQueue profile with tenant: <%s> and id: <%s>",
err.Error(), tntID[0], tntID[1])
}
@@ -596,107 +478,17 @@ func (m *Migrator) migrateStatQueueProfileFiltersV2() (err error) {
return
}
-func (m *Migrator) migrateThresholdsProfileFiltersV2() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ThresholdProfilePrefix)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting threshold profile IDs", err)
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ThresholdProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for thresholdProfile", id)
- }
- ths, err := m.dmIN.DataManager().GetThresholdProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting threshold profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- if ths == nil || m.dryRun {
- continue
- }
- for i, fl := range ths.FilterIDs {
- ths.FilterIDs[i] = migrateInlineFilterV2(fl)
- }
- if err := m.dmOut.DataManager().SetThresholdProfile(context.TODO(), ths, true); err != nil {
- return fmt.Errorf("error: <%s> when setting threshold profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
-func (m *Migrator) migrateSupplierProfileFiltersV2() (err error) {
- for {
- var spp *SupplierProfile
- spp, err = m.dmIN.getSupplier()
- if err == utils.ErrNoMoreData {
- err = nil
- break
- }
- if err != nil {
- return err
- }
- if spp == nil || m.dryRun {
- continue
- }
- for i, fl := range spp.FilterIDs {
- spp.FilterIDs[i] = migrateInlineFilterV2(fl)
- }
- if err := m.dmOut.setSupplier(spp); err != nil {
- return fmt.Errorf("error: <%s> when setting supplier profile with tenant: <%s> and id: <%s>",
- err.Error(), spp.Tenant, spp.ID)
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
-func (m *Migrator) migrateAttributeProfileFiltersV2() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting attribute profile IDs", err)
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.AttributeProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating filter for attributeProfile", id)
- }
- attrPrf, err := m.dmIN.DataManager().GetAttributeProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return fmt.Errorf("error: <%s> when getting attribute profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- if attrPrf == nil || m.dryRun {
- continue
- }
- for i, fl := range attrPrf.FilterIDs {
- attrPrf.FilterIDs[i] = migrateInlineFilterV2(fl)
- }
- for i, attr := range attrPrf.Attributes {
- if attr.Path == utils.EmptyString {
- // in case of older version of attributes we do not have Path
- // stop the inline migration until the attributes are migrated
- return fmt.Errorf("error: when migrating filter for attribute profile: <%s>", attrPrf.TenantID())
- }
- for j, fl := range attr.FilterIDs {
- attrPrf.Attributes[i].FilterIDs[j] = migrateInlineFilterV2(fl)
- }
- }
- if err := m.dmOut.DataManager().SetAttributeProfile(context.TODO(), attrPrf, true); err != nil {
- return fmt.Errorf("error: <%s> when setting attribute profile with tenant: <%s> and id: <%s>",
- err.Error(), tntID[0], tntID[1])
- }
- m.stats[utils.RQF]++
- }
- return
-}
-
func (m *Migrator) migrateChargerProfileFiltersV2() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
+ ids, err = dataDB.GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
if err != nil {
return fmt.Errorf("error: <%s> when getting charger profile IDs", err)
}
@@ -705,7 +497,7 @@ func (m *Migrator) migrateChargerProfileFiltersV2() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating filter for chargerProfile", id)
}
- cpp, err := m.dmIN.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ cpp, err := mInDB.DataManager().GetChargerProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return fmt.Errorf("error: <%s> when getting charger profile with tenant: <%s> and id: <%s>",
err.Error(), tntID[0], tntID[1])
@@ -716,7 +508,11 @@ func (m *Migrator) migrateChargerProfileFiltersV2() (err error) {
for i, fl := range cpp.FilterIDs {
cpp.FilterIDs[i] = migrateInlineFilterV2(fl)
}
- if err := m.dmOut.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaChargerProfiles)
+ if err != nil {
+ return err
+ }
+ if err := mOutDB.DataManager().SetChargerProfile(context.TODO(), cpp, true); err != nil {
return fmt.Errorf("error: <%s> when setting charger profile with tenant: <%s> and id: <%s>",
err.Error(), tntID[0], tntID[1])
}
@@ -743,7 +539,11 @@ type v1FilterRule struct {
func (m *Migrator) migrateRequestFilterV4(v4Fltr *engine.Filter) (fltr *engine.Filter, err error) {
if v4Fltr == nil {
// read data from DataDB
- v4Fltr, err = m.dmIN.getV4Filter()
+ mInDB, err := m.GetINConn(utils.MetaFilters)
+ if err != nil {
+ return nil, err
+ }
+ v4Fltr, err = mInDB.getV4Filter()
if err != nil {
return nil, err
}
@@ -841,13 +641,21 @@ func migrateInlineFilterV4(v4fltIDs []string) (fltrIDs []string, err error) {
// setFilterv5WithoutCompile we need a method that get's the filter from DataDB without compiling the filter rules
func (m *Migrator) setFilterv5WithoutCompile(fltr *engine.Filter) (err error) {
+ mOutDB, err := m.GetOUTConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mOutDB.DataManager().DBConns().GetConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
var oldFlt *engine.Filter
- if oldFlt, err = m.dmOut.DataManager().DataDB().GetFilterDrv(context.TODO(), fltr.Tenant, fltr.ID); err != nil &&
+ if oldFlt, err = dataDB.GetFilterDrv(context.TODO(), fltr.Tenant, fltr.ID); err != nil &&
err != utils.ErrNotFound {
return
}
- if err = m.dmOut.DataManager().DataDB().SetFilterDrv(context.TODO(), fltr); err != nil {
+ if err = dataDB.SetFilterDrv(context.TODO(), fltr); err != nil {
return
}
- return engine.UpdateFilterIndex(context.TODO(), m.dmOut.DataManager(), oldFlt, fltr)
+ return engine.UpdateFilterIndex(context.TODO(), mOutDB.DataManager(), oldFlt, fltr)
}
diff --git a/migrator/load_ids.go b/migrator/load_ids.go
index 68a5b57b3..392146ef2 100644
--- a/migrator/load_ids.go
+++ b/migrator/load_ids.go
@@ -29,8 +29,16 @@ func (m *Migrator) migrateLoadIDs() (err error) {
return
}
if vrs[utils.LoadIDs] != 1 {
- if err = m.dmOut.DataManager().DataDB().RemoveLoadIDsDrv(); err != nil {
- return
+ mOutDB, err := m.GetOUTConn(utils.MetaLoadIDs)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mOutDB.DataManager().DBConns().GetConn(utils.MetaLoadIDs)
+ if err != nil {
+ return err
+ }
+ if err = dataDB.RemoveLoadIDsDrv(); err != nil {
+ return err
}
if err = m.setVersions(utils.LoadIDsVrs); err != nil {
return err
diff --git a/migrator/migrator.go b/migrator/migrator.go
index 906bbf7d0..9d19bd5f3 100644
--- a/migrator/migrator.go
+++ b/migrator/migrator.go
@@ -22,16 +22,19 @@ import (
"fmt"
"log"
+ "github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
-func NewMigrator(dmIN, dmOut MigratorDataDB,
+func NewMigrator(dataDBCfg *config.DbCfg, dmFrom, dmTo map[string]MigratorDataDB,
dryRun, sameDataDB bool) (m *Migrator, err error) {
stats := make(map[string]int)
+
m = &Migrator{
- dmOut: dmOut,
- dmIN: dmIN,
+ dataDBCfg: dataDBCfg,
+ dmTo: dmTo,
+ dmFrom: dmFrom,
dryRun: dryRun,
sameDataDB: sameDataDB,
stats: stats,
@@ -40,13 +43,44 @@ func NewMigrator(dmIN, dmOut MigratorDataDB,
}
type Migrator struct {
- dmIN MigratorDataDB
- dmOut MigratorDataDB
+ dataDBCfg *config.DbCfg
+ dmFrom map[string]MigratorDataDB
+ dmTo map[string]MigratorDataDB
dryRun bool
sameDataDB bool
stats map[string]int
}
+// GetINConn returns the IN MigratorDataDB where the provided itemID is stored.
+// Returns the *default MigratorDataDB conn if it can't find the item's corresponding DB
+func (m *Migrator) GetINConn(itemID string) (db MigratorDataDB, err error) {
+ var ok bool
+ dbConnID, ok := m.dataDBCfg.Items[itemID]
+ if !ok {
+ return nil, fmt.Errorf("couldn't find item with ID: <%v>", itemID)
+ }
+ if db, ok = m.dmFrom[dbConnID.DBConn]; !ok {
+ // return *default db if DBConn is not found
+ return m.dmFrom[utils.MetaDefault], nil
+ }
+ return
+}
+
+// GetOUTConn returns the OUT MigratorDataDB where the provided itemID is stored.
+// Returns the *default MigratorDataDB conn if it can't find the item's corresponding DB
+func (m *Migrator) GetOUTConn(itemID string) (db MigratorDataDB, err error) {
+ var ok bool
+ dbConnID, ok := m.dataDBCfg.Items[itemID]
+ if !ok {
+ return nil, fmt.Errorf("couldn't find item with ID: <%v>", itemID)
+ }
+ if db, ok = m.dmTo[dbConnID.DBConn]; !ok {
+ // return *default db if DBConn is not found
+ return m.dmTo[utils.MetaDefault], nil
+ }
+ return
+}
+
// Migrate implements the tasks to migrate, used as a dispatcher to the individual methods
func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
stats = make(map[string]int)
@@ -62,43 +96,40 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
log.Print("Cannot dryRun SetVersions!")
return
}
- err = engine.OverwriteDBVersions(m.dmOut.DataManager().DataDB())
- if err != nil {
- return utils.NewCGRError(utils.Migrator, utils.ServerErrorCaps, err.Error(),
- fmt.Sprintf("error: <%s> when seting versions for DataDB", err.Error())), nil
+ if dmTo, err := m.GetOUTConn(utils.CacheVersions); err != nil {
+ return err, nil
+ } else {
+ if dataDB, _, err := dmTo.DataManager().DBConns().GetConn(utils.CacheVersions); err != nil {
+ return err, nil
+ } else if err = engine.OverwriteDBVersions(dataDB); err != nil {
+ return utils.NewCGRError(utils.Migrator, utils.ServerErrorCaps, err.Error(),
+ fmt.Sprintf("error: <%s> when seting versions for DataDB", err.Error())), nil
+ }
}
case utils.MetaEnsureIndexes:
-
- if m.dmOut.DataManager().DataDB().GetStorageType() == utils.MetaMongo {
- mgo := m.dmOut.DataManager().DataDB().(*engine.MongoStorage)
- if err = mgo.EnsureIndexes(); err != nil {
- return
+ mongoDBFound := false // track if no mongo DBs were found for case *ensure_indexes
+ // since DataManager is the same in all OUT migratorDataDBs, taking the default
+ // migratorDataDB will suffice
+ for _, db := range m.dmTo[utils.MetaDefault].DataManager().DataDB() {
+ if db.GetStorageType() == utils.MetaMongo {
+ mgo := db.(*engine.MongoStorage)
+ if err = mgo.EnsureIndexes(); err != nil {
+ return
+ }
+ mongoDBFound = true
}
- } else {
+ }
+ if !mongoDBFound {
log.Printf("The DataDB type has to be %s .\n ", utils.MetaMongo)
}
case utils.MetaStats:
err = m.migrateStats()
- case utils.MetaThresholds:
- err = m.migrateThresholds()
- case utils.MetaAttributes:
- err = m.migrateAttributeProfile()
case utils.MetaFilters:
err = m.migrateFilters()
- case utils.MetaRoutes:
- err = m.migrateRouteProfiles()
case utils.MetaAccounts:
err = m.migrateAccounts()
//only Move
- case utils.MetaActionProfiles:
- err = m.migrateActionProfiles()
- case utils.MetaResources:
- err = m.migrateResources()
- case utils.MetaRateProfiles:
- err = m.migrateRateProfiles()
- case utils.MetaSubscribers:
- err = m.migrateSubscribers()
case utils.MetaChargers:
err = m.migrateChargers()
//TPs
@@ -109,24 +140,9 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
if err := m.migrateStats(); err != nil {
log.Print("ERROR: ", utils.MetaStats, " ", err)
}
- if err := m.migrateThresholds(); err != nil {
- log.Print("ERROR: ", utils.MetaThresholds, " ", err)
- }
- if err := m.migrateRouteProfiles(); err != nil {
- log.Print("ERROR: ", utils.MetaRoutes, " ", err)
- }
- if err := m.migrateAttributeProfile(); err != nil {
- log.Print("ERROR: ", utils.MetaAttributes, " ", err)
- }
if err := m.migrateFilters(); err != nil {
log.Print("ERROR: ", utils.MetaFilters, " ", err)
}
- if err := m.migrateResources(); err != nil {
- log.Print("ERROR: ", utils.MetaResources, " ", err)
- }
- if err := m.migrateSubscribers(); err != nil {
- log.Print("ERROR: ", utils.MetaSubscribers, " ", err)
- }
if err = m.migrateLoadIDs(); err != nil {
log.Print("ERROR: ", utils.MetaLoadIDs, " ", err)
}
@@ -141,19 +157,29 @@ func (m *Migrator) Migrate(taskIDs []string) (err error, stats map[string]int) {
}
func (m *Migrator) ensureIndexesDataDB(cols ...string) error {
- if m.dmOut.DataManager().DataDB().GetStorageType() != utils.MetaMongo {
- return nil
+ // since DataManager is the same in all OUT migratorDataDBs, taking the default
+ // migratorDataDB will suffice
+ for _, db := range m.dmTo[utils.MetaDefault].DataManager().DataDB() {
+ if db.GetStorageType() == utils.MetaMongo {
+ mgo := db.(*engine.MongoStorage)
+ if err := mgo.EnsureIndexes(cols...); err != nil {
+ return err
+ }
+ }
}
- mgo := m.dmOut.DataManager().DataDB().(*engine.MongoStorage)
- return mgo.EnsureIndexes(cols...)
+ return nil
}
// closes all opened DBs
func (m *Migrator) Close() {
- if m.dmIN != nil {
- m.dmIN.close()
+ if m.dmFrom != nil {
+ for _, dm := range m.dmFrom {
+ dm.close()
+ }
}
- if m.dmOut != nil {
- m.dmOut.close()
+ if m.dmTo != nil {
+ for _, dm := range m.dmTo {
+ dm.close()
+ }
}
}
diff --git a/migrator/migrator_datadb.go b/migrator/migrator_datadb.go
index 32158a58f..e8c27ca97 100644
--- a/migrator/migrator_datadb.go
+++ b/migrator/migrator_datadb.go
@@ -28,37 +28,15 @@ type MigratorDataDB interface {
setV1Stats(x *v1Stat) (err error)
getV2Stats() (v2 *engine.StatQueue, err error)
setV2Stats(v2 *engine.StatQueue) (err error)
- getV1AttributeProfile() (v1attrPrf *v1AttributeProfile, err error)
- setV1AttributeProfile(x *v1AttributeProfile) (err error)
- getV2ThresholdProfile() (v2T *v2Threshold, err error)
- setV2ThresholdProfile(x *v2Threshold) (err error)
- remV2ThresholdProfile(tenant, id string) (err error)
- getV2AttributeProfile() (v2attrPrf *v2AttributeProfile, err error)
- setV2AttributeProfile(x *v2AttributeProfile) (err error)
- remV2AttributeProfile(tenant, id string) (err error)
- getV3AttributeProfile() (v3attrPrf *v3AttributeProfile, err error)
- setV3AttributeProfile(x *v3AttributeProfile) (err error)
- remV3AttributeProfile(tenant, id string) (err error)
-
- getV4AttributeProfile() (v4attrPrf *v4AttributeProfile, err error)
- setV4AttributeProfile(x *v4AttributeProfile) (err error)
- remV4AttributeProfile(tenant, id string) (err error)
- getV5AttributeProfile() (v5attrPrf *v6AttributeProfile, err error)
getV1Filter() (v1Fltr *v1Filter, err error)
setV1Filter(x *v1Filter) (err error)
remV1Filter(tenant, id string) (err error)
getV4Filter() (v1Fltr *engine.Filter, err error)
- getSupplier() (spl *SupplierProfile, err error)
- setSupplier(spl *SupplierProfile) (err error)
- remSupplier(tenant, id string) (err error)
-
getV1ChargerProfile() (v1chrPrf *utils.ChargerProfile, err error)
- getV1RouteProfile() (v1chrPrf *utils.RouteProfile, err error)
getV3Stats() (v1st *engine.StatQueueProfile, err error)
- getV3ThresholdProfile() (v2T *engine.ThresholdProfile, err error)
DataManager() *engine.DataManager
close()
diff --git a/migrator/migrator_utils.go b/migrator/migrator_utils.go
index 14f62901f..22b13037c 100644
--- a/migrator/migrator_utils.go
+++ b/migrator/migrator_utils.go
@@ -30,34 +30,51 @@ var (
SUPPLIER = "Supplier"
)
-func NewMigratorDataDB(db_type, host, port, name, user, pass,
- marshaler string, cfg *config.CGRConfig,
- opts *config.DataDBOpts, itmsCfg map[string]*config.ItemOpts) (db MigratorDataDB, err error) {
- dbCon, err := engine.NewDataDBConn(db_type, host,
- port, name, user, pass, marshaler, opts, itmsCfg)
- if err != nil {
- return nil, err
+func NewMigratorDataDBs(dbConnIDList []string, marshaler string,
+ cfg *config.CGRConfig) (db map[string]MigratorDataDB, err error) {
+ dataDBs := make(map[string]engine.DataDB, len(dbConnIDList))
+ for _, dbConnID := range dbConnIDList {
+ dbCon, err := engine.NewDataDBConn(cfg.DbCfg().DBConns[dbConnID].Type,
+ cfg.DbCfg().DBConns[dbConnID].Host, cfg.DbCfg().DBConns[dbConnID].Port,
+ cfg.DbCfg().DBConns[dbConnID].Name, cfg.DbCfg().DBConns[dbConnID].User,
+ cfg.DbCfg().DBConns[dbConnID].Password, marshaler,
+ cfg.DbCfg().DBConns[dbConnID].StringIndexedFields,
+ cfg.DbCfg().DBConns[dbConnID].PrefixIndexedFields,
+ cfg.MigratorCgrCfg().OutDBOpts, cfg.DbCfg().Items)
+ if err != nil {
+ return nil, err
+ }
+ dataDBs[dbConnID] = dbCon
}
- dm := engine.NewDataManager(dbCon, cfg, nil)
- var d MigratorDataDB
- switch db_type {
- case utils.MetaRedis:
- d = newRedisMigrator(dm)
- case utils.MetaMongo:
- d = newMongoMigrator(dm)
- db = d.(MigratorDataDB)
- case utils.MetaInternal:
- d = newInternalMigrator(dm)
- db = d.(MigratorDataDB)
- default:
- err = fmt.Errorf("unknown db '%s' valid options are '%s' or '%s or '%s'",
- db_type, utils.MetaRedis, utils.MetaMongo, utils.MetaInternal)
+ dbcManager := engine.NewDBConnManager(dataDBs, cfg.DbCfg())
+ dm := engine.NewDataManager(dbcManager, cfg, nil)
+ d := make(map[string]MigratorDataDB, len(dbConnIDList))
+ for _, dbConnID := range dbConnIDList {
+ switch cfg.DbCfg().DBConns[dbConnID].Type {
+ case utils.MetaRedis:
+ d[dbConnID] = newRedisMigrator(dm)
+ case utils.MetaMongo:
+ d[dbConnID] = newMongoMigrator(dm)
+ case utils.MetaInternal:
+ d[dbConnID] = newInternalMigrator(dm)
+ default:
+ err = fmt.Errorf("unknown db '%s' valid options are '%s' or '%s or '%s'",
+ cfg.DbCfg().DBConns[dbConnID].Type, utils.MetaRedis, utils.MetaMongo, utils.MetaInternal)
+ }
}
return d, nil
}
func (m *Migrator) getVersions(str string) (vrs engine.Versions, err error) {
- vrs, err = m.dmIN.DataManager().DataDB().GetVersions(utils.EmptyString)
+ mInDB, err := m.GetINConn(utils.CacheVersions)
+ if err != nil {
+ return nil, err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.CacheVersions)
+ if err != nil {
+ return nil, err
+ }
+ vrs, err = dataDB.GetVersions(utils.EmptyString)
if err != nil {
return nil, utils.NewCGRError(utils.Migrator,
utils.ServerErrorCaps,
@@ -73,8 +90,16 @@ func (m *Migrator) getVersions(str string) (vrs engine.Versions, err error) {
}
func (m *Migrator) setVersions(str string) (err error) {
+ mOutDB, err := m.GetOUTConn(utils.CacheVersions)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mOutDB.DataManager().DBConns().GetConn(utils.CacheVersions)
+ if err != nil {
+ return err
+ }
vrs := engine.Versions{str: engine.CurrentDataDBVersions()[str]}
- err = m.dmOut.DataManager().DataDB().SetVersions(vrs, false)
+ err = dataDB.SetVersions(vrs, false)
if err != nil {
err = utils.NewCGRError(utils.Migrator,
utils.ServerErrorCaps,
diff --git a/migrator/rateprofiles.go b/migrator/rateprofiles.go
deleted file mode 100644
index ae4f2e03b..000000000
--- a/migrator/rateprofiles.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentRateProfiles() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.RateProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.RateProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating rate profiles", id)
- }
- rp, err := m.dmIN.DataManager().GetRateProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if rp == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetRateProfile(context.TODO(), rp, false, true); err != nil {
- return err
- }
- if err := m.dmIN.DataManager().RemoveRateProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
- return err
- }
- m.stats[utils.RateProfiles]++
- }
- return
-}
-
-func (m *Migrator) migrateRateProfiles() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.RateProfiles); err != nil {
- return
- }
-
- migrated := true
- for {
- version := vrs[utils.RateProfiles]
- for {
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.RateProfiles]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentRateProfiles(); err != nil {
- return
- }
- }
- if version == current[utils.RateProfiles] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
- m.stats[utils.RateProfiles]++
- }
- // All done, update version wtih current one
- if err = m.setVersions(utils.RateProfiles); err != nil {
- return
- }
- return m.ensureIndexesDataDB(engine.ColRpp)
-}
diff --git a/migrator/resource.go b/migrator/resource.go
deleted file mode 100644
index 6687b048f..000000000
--- a/migrator/resource.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentResource() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ResourceProfilesPrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ResourceProfilesPrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating resource profiles", id)
- }
- res, err := m.dmIN.DataManager().GetResourceProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if res == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetResourceProfile(context.TODO(), res, true); err != nil {
- return err
- }
- if err := m.dmIN.DataManager().RemoveResourceProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
- return err
- }
- m.stats[utils.ResourceStr]++
- }
- return
-}
-
-func (m *Migrator) migrateResources() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.ResourceStr); err != nil {
- return
- }
-
- migrated := true
- for {
- version := vrs[utils.ResourceStr]
- for {
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.ResourceStr]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentResource(); err != nil {
- return
- }
- }
- if version == current[utils.ResourceStr] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
- // if !m.dryRun {
- // if err = m.dmIN.DataManager().SetResourceProfile(v2, true); err != nil {
- // return
- // }
- // }
- m.stats[utils.ResourceStr]++
- }
- // All done, update version wtih current one
- if err = m.setVersions(utils.ResourceStr); err != nil {
- return
- }
- return m.ensureIndexesDataDB(engine.ColRsP)
-}
diff --git a/migrator/routes.go b/migrator/routes.go
deleted file mode 100644
index f21df3fec..000000000
--- a/migrator/routes.go
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-var (
- SupplierProfilePrefix = "spp_"
- ColSpp = "supplier_profiles"
-)
-
-type Supplier struct {
- ID string // SupplierID
- FilterIDs []string
- AccountIDs []string
- RatingPlanIDs []string // used when computing price
- ResourceIDs []string // queried in some strategies
- StatIDs []string // queried in some strategies
- Weight float64
- Blocker bool // do not process further supplier after this one
- SupplierParameters string
-}
-
-type SupplierProfile struct {
- Tenant string
- ID string // LCR Profile ID
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Activation interval
- Sorting string // Sorting strategy
- SortingParameters []string
- Suppliers []*Supplier
- Weight float64
-}
-
-func (m *Migrator) removeSupplier() (err error) {
- for {
- var spp *SupplierProfile
- spp, err = m.dmIN.getSupplier()
- if err == utils.ErrNoMoreData {
- break
- }
- if err != nil {
- return
- }
- if err = m.dmIN.remSupplier(spp.Tenant, spp.ID); err != nil {
- return
- }
- }
- return
-
-}
-
-func (m *Migrator) migrateFromSupplierToRoute() (err error) {
- for {
- var spp *SupplierProfile
- spp, err = m.dmIN.getSupplier()
- if err == utils.ErrNoMoreData {
- break
- }
- if err != nil {
- return err
- }
- if spp == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetRouteProfile(context.TODO(), convertSupplierToRoute(spp), true); err != nil {
- return err
- }
- m.stats[utils.Routes]++
- }
- if m.dryRun {
- return
- }
- if err = m.removeSupplier(); err != nil && err != utils.ErrNoMoreData {
- return
- }
- // All done, update version with current one
- vrs := engine.Versions{utils.Routes: 1}
- if err = m.dmOut.DataManager().DataDB().SetVersions(vrs, false); err != nil {
- return utils.NewCGRError(utils.Migrator,
- utils.ServerErrorCaps,
- err.Error(),
- fmt.Sprintf("error: <%s> when updating RouteProfiles version into dataDB", err.Error()))
- }
- return
-}
-
-func (m *Migrator) migrateCurrentRouteProfile() (err error) {
- var ids []string
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.RouteProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.RouteProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("invalid key <%s> when migrating route profiles", id)
- }
- rPrf, err := m.dmIN.DataManager().GetRouteProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if rPrf == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetRouteProfile(context.TODO(), rPrf, true); err != nil {
- return err
- }
- if err := m.dmIN.DataManager().RemoveRouteProfile(context.TODO(), tntID[0], tntID[1], true); err != nil {
- return err
- }
- m.stats[utils.Routes]++
- }
- return
-}
-
-func (m *Migrator) migrateRouteProfiles() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.Routes); err != nil {
- return
- }
- routeVersion, has := vrs[utils.Routes]
- if !has {
- if vrs[utils.RQF] != current[utils.RQF] {
- return fmt.Errorf("please migrate the filters before migrating the routes")
- }
- if err = m.migrateFromSupplierToRoute(); err != nil {
- return
- }
- }
- migrated := true
- var v2 *utils.RouteProfile
- for {
- version := routeVersion
- for {
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.Routes]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentRouteProfile(); err != nil {
- return err
- }
- case 1:
- if v2, err = m.migrateV1ToV2Routes(); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 2
- }
- if version == current[utils.Routes] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
- if !m.dryRun {
- if err = m.dmIN.DataManager().SetRouteProfile(context.TODO(), v2, true); err != nil {
- return
- }
- }
- m.stats[utils.Routes]++
- }
- // All done, update version with current one
- if err = m.setVersions(utils.Routes); err != nil {
- return
- }
-
- return m.ensureIndexesDataDB(engine.ColRts)
-}
-
-func convertSupplierToRoute(spp *SupplierProfile) (route *utils.RouteProfile) {
- route = &utils.RouteProfile{
- Tenant: spp.Tenant,
- ID: spp.ID,
- FilterIDs: spp.FilterIDs,
- Sorting: spp.Sorting,
- SortingParameters: spp.SortingParameters,
- Weights: utils.DynamicWeights{{Weight: spp.Weight}},
- }
- route.Routes = make([]*utils.Route, len(spp.Suppliers))
- for i, supl := range spp.Suppliers {
- route.Routes[i] = &utils.Route{
- ID: supl.ID,
- FilterIDs: supl.FilterIDs,
- AccountIDs: supl.AccountIDs,
- RateProfileIDs: supl.RatingPlanIDs,
- ResourceIDs: supl.ResourceIDs,
- StatIDs: supl.StatIDs,
- Weights: utils.DynamicWeights{{Weight: supl.Weight}},
- Blockers: utils.DynamicBlockers{{Blocker: supl.Blocker}},
- RouteParameters: supl.SupplierParameters,
- }
- }
- return
-}
-
-func (m *Migrator) migrateV1ToV2Routes() (v4Cpp *utils.RouteProfile, err error) {
- v4Cpp, err = m.dmIN.getV1RouteProfile()
- if err != nil {
- return nil, err
- } else if v4Cpp == nil {
- return nil, errors.New("Dispatcher NIL")
- }
- if v4Cpp.FilterIDs, err = migrateInlineFilterV4(v4Cpp.FilterIDs); err != nil {
- return nil, err
- }
- return
-}
diff --git a/migrator/stats.go b/migrator/stats.go
index d8063fa1e..cbab58415 100644
--- a/migrator/stats.go
+++ b/migrator/stats.go
@@ -59,9 +59,17 @@ type v1Stat struct {
}
func (m *Migrator) migrateCurrentStats() (err error) {
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ dataDB, _, err := mInDB.DataManager().DBConns().GetConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
//StatQueueProfile
var ids []string
- if ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.Background(), utils.StatQueueProfilePrefix); err != nil {
+ if ids, err = dataDB.GetKeysForPrefix(context.Background(), utils.StatQueueProfilePrefix); err != nil {
return err
}
for _, id := range ids {
@@ -69,11 +77,11 @@ func (m *Migrator) migrateCurrentStats() (err error) {
if len(tntID) < 2 {
return fmt.Errorf("Invalid key <%s> when migrating stat queue profiles", id)
}
- sqp, err := m.dmIN.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ sqp, err := mInDB.DataManager().GetStatQueueProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return err
}
- sgs, err := m.dmIN.DataManager().GetStatQueue(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
+ sgs, err := mInDB.DataManager().GetStatQueue(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
if err != nil {
return err
@@ -81,15 +89,23 @@ func (m *Migrator) migrateCurrentStats() (err error) {
if sqp == nil || m.dryRun {
continue
}
- if err := m.dmOut.DataManager().SetStatQueueProfile(context.TODO(), sqp, true); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
+ if err := mOutDB.DataManager().SetStatQueueProfile(context.TODO(), sqp, true); err != nil {
return err
}
if sgs != nil {
- if err := m.dmOut.DataManager().SetStatQueue(context.TODO(), sgs); err != nil {
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ if err := mOutDB.DataManager().SetStatQueue(context.TODO(), sgs); err != nil {
return err
}
}
- if err := m.dmIN.DataManager().RemoveStatQueueProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
+ if err := mInDB.DataManager().RemoveStatQueueProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
return err
}
m.stats[utils.Stats]++
@@ -98,8 +114,12 @@ func (m *Migrator) migrateCurrentStats() (err error) {
}
func (m *Migrator) migrateV1Stats() (filter *engine.Filter, v2Stats *engine.StatQueue, sts *engine.StatQueueProfile, err error) {
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return
+ }
var v1Sts *v1Stat
- v1Sts, err = m.dmIN.getV1Stats()
+ v1Sts, err = mInDB.getV1Stats()
if err != nil {
return nil, nil, nil, err
}
@@ -127,7 +147,11 @@ func remakeQueue(sq *engine.StatQueue) (out *engine.StatQueue) {
func (m *Migrator) migrateV2Stats(v2Stats *engine.StatQueue) (v3Stats *engine.StatQueue, err error) {
if v2Stats == nil {
// read from DB
- v2Stats, err = m.dmIN.getV2Stats()
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return nil, err
+ }
+ v2Stats, err = mInDB.getV2Stats()
if err != nil {
return nil, err
} else if v2Stats == nil {
@@ -196,17 +220,29 @@ func (m *Migrator) migrateStats() (err error) {
}
if !m.dryRun {
if vrs[utils.Stats] == 1 {
- if err = m.dmOut.DataManager().SetFilter(context.TODO(), filter, true); err != nil {
- return
+ mOutDB, err := m.GetOUTConn(utils.MetaFilters)
+ if err != nil {
+ return err
+ }
+ if err = mOutDB.DataManager().SetFilter(context.TODO(), filter, true); err != nil {
+ return err
}
}
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return err
+ }
// Set the fresh-migrated Stats into DB
- if err = m.dmOut.DataManager().SetStatQueueProfile(context.TODO(), v4sts, true); err != nil {
- return
+ if err = mOutDB.DataManager().SetStatQueueProfile(context.TODO(), v4sts, true); err != nil {
+ return err
}
if v3Stats != nil {
- if err = m.dmOut.DataManager().SetStatQueue(context.TODO(), v3Stats); err != nil {
- return
+ mOutDB, err := m.GetOUTConn(utils.MetaStatQueues)
+ if err != nil {
+ return err
+ }
+ if err = mOutDB.DataManager().SetStatQueue(context.TODO(), v3Stats); err != nil {
+ return err
}
}
}
@@ -404,9 +440,13 @@ func (v1Sts v1Stat) AsStatQP() (filter *engine.Filter, sq *engine.StatQueue, stq
func (m *Migrator) migrateV3ToV4Stats(v3sts *engine.StatQueueProfile) (v4Cpp *engine.StatQueueProfile, err error) {
if v3sts == nil {
+ mInDB, err := m.GetINConn(utils.MetaStatQueueProfiles)
+ if err != nil {
+ return nil, err
+ }
// read data from DataDB
- if v3sts, err = m.dmIN.getV3Stats(); err != nil {
- return
+ if v3sts, err = mInDB.getV3Stats(); err != nil {
+ return nil, err
}
}
if v3sts.FilterIDs, err = migrateInlineFilterV4(v3sts.FilterIDs); err != nil {
diff --git a/migrator/storage_map_datadb.go b/migrator/storage_map_datadb.go
index 2e8f80e55..25be7fa68 100644
--- a/migrator/storage_map_datadb.go
+++ b/migrator/storage_map_datadb.go
@@ -31,10 +31,17 @@ type internalMigrator struct {
}
func newInternalMigrator(dm *engine.DataManager) (iDBMig *internalMigrator) {
- return &internalMigrator{
- dm: dm,
- iDB: dm.DataDB().(*engine.InternalDB),
+ var iDB *engine.InternalDB
+ for _, dbInf := range dm.DataDB() {
+ var canCast bool
+ if iDB, canCast = dbInf.(*engine.InternalDB); canCast {
+ return &internalMigrator{
+ dm: dm,
+ iDB: iDB,
+ }
+ }
}
+ return nil
}
func (iDBMig *internalMigrator) DataManager() *engine.DataManager {
@@ -65,88 +72,6 @@ func (iDBMig *internalMigrator) setV2Stats(v2 *engine.StatQueue) (err error) {
return utils.ErrNotImplemented
}
-// AttributeProfile methods
-// get
-func (iDBMig *internalMigrator) getV1AttributeProfile() (v1attrPrf *v1AttributeProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setV1AttributeProfile(x *v1AttributeProfile) (err error) {
- return utils.ErrNotImplemented
-}
-
-// ThresholdProfile methods
-// get
-func (iDBMig *internalMigrator) getV2ThresholdProfile() (v2T *v2Threshold, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-func (iDBMig *internalMigrator) getV3ThresholdProfile() (v2T *engine.ThresholdProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setV2ThresholdProfile(x *v2Threshold) (err error) {
- return utils.ErrNotImplemented
-}
-
-// rem
-func (iDBMig *internalMigrator) remV2ThresholdProfile(tenant, id string) (err error) {
- return utils.ErrNotImplemented
-}
-
-// AttributeProfile methods
-// get
-func (iDBMig *internalMigrator) getV2AttributeProfile() (v2attrPrf *v2AttributeProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setV2AttributeProfile(x *v2AttributeProfile) (err error) {
- return utils.ErrNotImplemented
-}
-
-// rem
-func (iDBMig *internalMigrator) remV2AttributeProfile(tenant, id string) (err error) {
- return utils.ErrNotImplemented
-}
-
-// AttributeProfile methods
-// get
-func (iDBMig *internalMigrator) getV3AttributeProfile() (v3attrPrf *v3AttributeProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setV3AttributeProfile(x *v3AttributeProfile) (err error) {
- return utils.ErrNotImplemented
-}
-
-// rem
-func (iDBMig *internalMigrator) remV3AttributeProfile(tenant, id string) (err error) {
- return utils.ErrNotImplemented
-}
-
-// AttributeProfile methods
-// get
-func (iDBMig *internalMigrator) getV4AttributeProfile() (v4attrPrf *v4AttributeProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-func (iDBMig *internalMigrator) getV5AttributeProfile() (v4attrPrf *v6AttributeProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setV4AttributeProfile(x *v4AttributeProfile) (err error) {
- return utils.ErrNotImplemented
-}
-
-// rem
-func (iDBMig *internalMigrator) remV4AttributeProfile(tenant, id string) (err error) {
- return utils.ErrNotImplemented
-}
-
// Filter Methods
// get
func (iDBMig *internalMigrator) getV1Filter() (v1Fltr *v1Filter, err error) {
@@ -167,22 +92,6 @@ func (iDBMig *internalMigrator) remV1Filter(tenant, id string) (err error) {
return utils.ErrNotImplemented
}
-// Supplier Methods
-// get
-func (iDBMig *internalMigrator) getSupplier() (spl *SupplierProfile, err error) {
- return nil, utils.ErrNotImplemented
-}
-
-// set
-func (iDBMig *internalMigrator) setSupplier(spl *SupplierProfile) (err error) {
- return utils.ErrNotImplemented
-}
-
-// rem
-func (iDBMig *internalMigrator) remSupplier(tenant, id string) (err error) {
- return utils.ErrNotImplemented
-}
-
func (iDBMig *internalMigrator) close() {}
func (iDBMig *internalMigrator) getV1ChargerProfile() (v1chrPrf *utils.ChargerProfile, err error) {
diff --git a/migrator/storage_mongo_datadb.go b/migrator/storage_mongo_datadb.go
index 577bde7fd..a3e69442a 100644
--- a/migrator/storage_mongo_datadb.go
+++ b/migrator/storage_mongo_datadb.go
@@ -43,11 +43,18 @@ type mongoMigrator struct {
}
func newMongoMigrator(dm *engine.DataManager) (mgoMig *mongoMigrator) {
- return &mongoMigrator{
- dm: dm,
- mgoDB: dm.DataDB().(*engine.MongoStorage),
- cursor: nil,
+ var mgoDB *engine.MongoStorage
+ for _, dbInf := range dm.DataDB() {
+ var canCast bool
+ if mgoDB, canCast = dbInf.(*engine.MongoStorage); canCast {
+ return &mongoMigrator{
+ dm: dm,
+ mgoDB: mgoDB,
+ cursor: nil,
+ }
+ }
}
+ return nil
}
func (v1ms *mongoMigrator) close() {
@@ -130,203 +137,6 @@ func (v1ms *mongoMigrator) setV2Stats(v2 *engine.StatQueue) (err error) {
return
}
-// AttributeProfile methods
-// get
-func (v1ms *mongoMigrator) getV1AttributeProfile() (v1attrPrf *v1AttributeProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v1attrPrf = new(v1AttributeProfile)
- if err := (*v1ms.cursor).Decode(v1attrPrf); err != nil {
- return nil, err
- }
- return v1attrPrf, nil
-}
-
-// set
-func (v1ms *mongoMigrator) setV1AttributeProfile(x *v1AttributeProfile) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).InsertOne(v1ms.mgoDB.GetContext(), x)
- return
-}
-
-// ThresholdProfile methods
-// get
-func (v1ms *mongoMigrator) getV2ThresholdProfile() (v2T *v2Threshold, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v2ThresholdProfileCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v2T = new(v2Threshold)
- if err := (*v1ms.cursor).Decode(v2T); err != nil {
- return nil, err
- }
- return v2T, nil
-}
-
-func (v1ms *mongoMigrator) getV3ThresholdProfile() (v2T *engine.ThresholdProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(engine.ColTps).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v2T = new(engine.ThresholdProfile)
- if err := (*v1ms.cursor).Decode(v2T); err != nil {
- return nil, err
- }
- return v2T, nil
-}
-
-// set
-func (v1ms *mongoMigrator) setV2ThresholdProfile(x *v2Threshold) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v2ThresholdProfileCol).InsertOne(v1ms.mgoDB.GetContext(), x)
- return
-}
-
-// rem
-func (v1ms *mongoMigrator) remV2ThresholdProfile(tenant, id string) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v2ThresholdProfileCol).DeleteOne(v1ms.mgoDB.GetContext(), bson.M{"tenant": tenant, "id": id})
- return
-}
-
-// AttributeProfile methods
-// get
-func (v1ms *mongoMigrator) getV2AttributeProfile() (v2attrPrf *v2AttributeProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v2attrPrf = new(v2AttributeProfile)
- if err := (*v1ms.cursor).Decode(v2attrPrf); err != nil {
- return nil, err
- }
- return v2attrPrf, nil
-}
-
-// set
-func (v1ms *mongoMigrator) setV2AttributeProfile(x *v2AttributeProfile) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).InsertOne(v1ms.mgoDB.GetContext(), x)
- return
-}
-
-// rem
-func (v1ms *mongoMigrator) remV2AttributeProfile(tenant, id string) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).DeleteOne(v1ms.mgoDB.GetContext(), bson.M{"tenant": tenant, "id": id})
- return
-}
-
-// AttributeProfile methods
-// get
-func (v1ms *mongoMigrator) getV3AttributeProfile() (v3attrPrf *v3AttributeProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v3attrPrf = new(v3AttributeProfile)
- if err := (*v1ms.cursor).Decode(v3attrPrf); err != nil {
- return nil, err
- }
- return v3attrPrf, nil
-}
-
-// set
-func (v1ms *mongoMigrator) setV3AttributeProfile(x *v3AttributeProfile) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).InsertOne(v1ms.mgoDB.GetContext(), x)
- return
-}
-
-// rem
-func (v1ms *mongoMigrator) remV3AttributeProfile(tenant, id string) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).DeleteOne(v1ms.mgoDB.GetContext(), bson.M{"tenant": tenant, "id": id})
- return
-}
-
-// AttributeProfile methods
-// get
-func (v1ms *mongoMigrator) getV4AttributeProfile() (v4attrPrf *v4AttributeProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v4attrPrf = new(v4AttributeProfile)
- if err := (*v1ms.cursor).Decode(v4attrPrf); err != nil {
- return nil, err
- }
- return v4attrPrf, nil
-}
-
-func (v1ms *mongoMigrator) getV5AttributeProfile() (v5attrPrf *v6AttributeProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- v5attrPrf = new(v6AttributeProfile)
- if err := (*v1ms.cursor).Decode(v5attrPrf); err != nil {
- return nil, err
- }
- return v5attrPrf, nil
-}
-
-// set
-func (v1ms *mongoMigrator) setV4AttributeProfile(x *v4AttributeProfile) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).InsertOne(v1ms.mgoDB.GetContext(), x)
- return
-}
-
-// rem
-func (v1ms *mongoMigrator) remV4AttributeProfile(tenant, id string) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(v1AttributeProfilesCol).DeleteOne(v1ms.mgoDB.GetContext(), bson.M{"tenant": tenant, "id": id})
- return
-}
-
// Filter Methods
// get
func (v1ms *mongoMigrator) getV1Filter() (v1Fltr *v1Filter, err error) {
@@ -379,39 +189,6 @@ func (v1ms *mongoMigrator) remV1Filter(tenant, id string) (err error) {
return
}
-// Supplier Methods
-// get
-func (v1ms *mongoMigrator) getSupplier() (spl *SupplierProfile, err error) {
- if v1ms.cursor == nil {
- v1ms.cursor, err = v1ms.mgoDB.DB().Collection(ColSpp).Find(v1ms.mgoDB.GetContext(), bson.D{})
- if err != nil {
- return nil, err
- }
- }
- if !(*v1ms.cursor).Next(v1ms.mgoDB.GetContext()) {
- (*v1ms.cursor).Close(v1ms.mgoDB.GetContext())
- v1ms.cursor = nil
- return nil, utils.ErrNoMoreData
- }
- spl = new(SupplierProfile)
- if err := (*v1ms.cursor).Decode(spl); err != nil {
- return nil, err
- }
- return
-}
-
-// set
-func (v1ms *mongoMigrator) setSupplier(spl *SupplierProfile) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(ColSpp).InsertOne(v1ms.mgoDB.GetContext(), spl)
- return
-}
-
-// rem
-func (v1ms *mongoMigrator) remSupplier(tenant, id string) (err error) {
- _, err = v1ms.mgoDB.DB().Collection(ColSpp).DeleteOne(v1ms.mgoDB.GetContext(), bson.M{"tenant": tenant, "id": id})
- return
-}
-
func (v1ms *mongoMigrator) getV1ChargerProfile() (v1chrPrf *utils.ChargerProfile, err error) {
if v1ms.cursor == nil {
v1ms.cursor, err = v1ms.mgoDB.DB().Collection(engine.ColCpp).Find(v1ms.mgoDB.GetContext(), bson.D{})
diff --git a/migrator/storage_redis.go b/migrator/storage_redis.go
index ea2ba70d4..4b22d7e20 100644
--- a/migrator/storage_redis.go
+++ b/migrator/storage_redis.go
@@ -36,10 +36,17 @@ var (
)
func newRedisMigrator(dm *engine.DataManager) (rM *redisMigrator) {
- return &redisMigrator{
- dm: dm,
- rds: dm.DataDB().(*engine.RedisStorage),
+ var rdsDB *engine.RedisStorage
+ for _, dbInf := range dm.DataDB() {
+ var canCast bool
+ if rdsDB, canCast = dbInf.(*engine.RedisStorage); canCast {
+ return &redisMigrator{
+ dm: dm,
+ rds: rdsDB,
+ }
+ }
}
+ return nil
}
func (v1rs *redisMigrator) close() {
@@ -157,292 +164,6 @@ func (v1rs *redisMigrator) setV2Stats(v2 *engine.StatQueue) (err error) {
return
}
-// AttributeProfile methods
-// get
-func (v1rs *redisMigrator) getV1AttributeProfile() (v1attrPrf *v1AttributeProfile, err error) {
- var v1attr *v1AttributeProfile
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v1attr); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v1attr, nil
-}
-
-// set
-func (v1rs *redisMigrator) setV1AttributeProfile(x *v1AttributeProfile) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(x.Tenant, x.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(x)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// ThresholdProfile methods
-// get
-func (v1rs *redisMigrator) getV2ThresholdProfile() (v2T *v2Threshold, err error) {
- var v2Th *v2Threshold
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.ThresholdProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v2Th); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v2Th, nil
-}
-
-func (v1rs *redisMigrator) getV3ThresholdProfile() (v2T *engine.ThresholdProfile, err error) {
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.ThresholdProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v2T); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v2T, nil
-}
-
-// set
-func (v1rs *redisMigrator) setV2ThresholdProfile(x *v2Threshold) (err error) {
- key := utils.ThresholdProfilePrefix + utils.ConcatenatedKey(x.Tenant, x.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(x)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// rem
-func (v1rs *redisMigrator) remV2ThresholdProfile(tenant, id string) (err error) {
- key := utils.ThresholdProfilePrefix + utils.ConcatenatedKey(tenant, id)
- return v1rs.rds.Cmd(nil, "DEL", key)
-}
-
-// AttributeProfile methods
-// get
-func (v1rs *redisMigrator) getV2AttributeProfile() (v2attrPrf *v2AttributeProfile, err error) {
- var v2attr *v2AttributeProfile
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v2attr); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v2attr, nil
-}
-
-// set
-func (v1rs *redisMigrator) setV2AttributeProfile(x *v2AttributeProfile) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(x.Tenant, x.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(x)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// rem
-func (v1rs *redisMigrator) remV2AttributeProfile(tenant, id string) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(tenant, id)
- return v1rs.rds.Cmd(nil, "DEL", key)
-}
-
-// AttributeProfile methods
-// get
-func (v1rs *redisMigrator) getV3AttributeProfile() (v3attrPrf *v3AttributeProfile, err error) {
- var v3attr *v3AttributeProfile
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v3attr); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v3attr, nil
-}
-
-// set
-func (v1rs *redisMigrator) setV3AttributeProfile(x *v3AttributeProfile) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(x.Tenant, x.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(x)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// rem
-func (v1rs *redisMigrator) remV3AttributeProfile(tenant, id string) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(tenant, id)
- return v1rs.rds.Cmd(nil, "DEL", key)
-}
-
-// AttributeProfile methods
-// get
-func (v1rs *redisMigrator) getV4AttributeProfile() (v3attrPrf *v4AttributeProfile, err error) {
- var v4attr *v4AttributeProfile
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v4attr); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return v4attr, nil
-}
-
-func (v1rs *redisMigrator) getV5AttributeProfile() (v5attr *v6AttributeProfile, err error) {
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.AttributeProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &v5attr); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return
-}
-
-// set
-func (v1rs *redisMigrator) setV4AttributeProfile(x *v4AttributeProfile) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(x.Tenant, x.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(x)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// rem
-func (v1rs *redisMigrator) remV4AttributeProfile(tenant, id string) (err error) {
- key := utils.AttributeProfilePrefix + utils.ConcatenatedKey(tenant, id)
- return v1rs.rds.Cmd(nil, "DEL", key)
-}
-
// Filter Methods
// get
func (v1rs *redisMigrator) getV1Filter() (v1Fltr *v1Filter, err error) {
@@ -513,52 +234,6 @@ func (v1rs *redisMigrator) remV1Filter(tenant, id string) (err error) {
return v1rs.rds.Cmd(nil, "DEL", key)
}
-// SupplierMethods
-func (v1rs *redisMigrator) getSupplier() (spl *SupplierProfile, err error) {
- if v1rs.qryIdx == nil {
- v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), SupplierProfilePrefix)
- if err != nil {
- return
- } else if len(v1rs.dataKeys) == 0 {
- return nil, utils.ErrNoMoreData
- }
- v1rs.qryIdx = utils.IntPointer(0)
- }
- if *v1rs.qryIdx <= len(v1rs.dataKeys)-1 {
- var strVal []byte
- if err = v1rs.rds.Cmd(&strVal, "GET", v1rs.dataKeys[*v1rs.qryIdx]); err != nil {
- return nil, err
- }
- if err := v1rs.rds.Marshaler().Unmarshal(strVal, &spl); err != nil {
- return nil, err
- }
- *v1rs.qryIdx = *v1rs.qryIdx + 1
- } else {
- v1rs.qryIdx = nil
- return nil, utils.ErrNoMoreData
- }
- return
-}
-
-// set
-func (v1rs *redisMigrator) setSupplier(spl *SupplierProfile) (err error) {
- key := SupplierProfilePrefix + utils.ConcatenatedKey(spl.Tenant, spl.ID)
- bit, err := v1rs.rds.Marshaler().Marshal(spl)
- if err != nil {
- return err
- }
- if err = v1rs.rds.Cmd(nil, "SET", key, string(bit)); err != nil {
- return err
- }
- return
-}
-
-// rem
-func (v1rs *redisMigrator) remSupplier(tenant, id string) (err error) {
- key := SupplierProfilePrefix + utils.ConcatenatedKey(tenant, id)
- return v1rs.rds.Cmd(nil, "DEL", key)
-}
-
func (v1rs *redisMigrator) getV1ChargerProfile() (v1chrPrf *utils.ChargerProfile, err error) {
if v1rs.qryIdx == nil {
v1rs.dataKeys, err = v1rs.rds.GetKeysForPrefix(context.TODO(), utils.ChargerProfilePrefix)
diff --git a/migrator/subscribers.go b/migrator/subscribers.go
deleted file mode 100644
index c0dad5660..000000000
--- a/migrator/subscribers.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-// func (m *Migrator) migrateCurrentSubscribers() (err error) {
-// subs, err := m.dmIN.DataManager().GetSubscribers()
-// if err != nil {
-// return err
-// }
-// for id, sub := range subs {
-// if sub != nil {
-// if m.dryRun != true {
-// if err := m.dmOut.DataManager().SetSubscriber(id, sub); err != nil {
-// return err
-// }
-// m.stats[utils.Subscribers] += 1
-// }
-// }
-// }
-// return
-// }
-
-func (m *Migrator) migrateSubscribers() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.Subscribers); err != nil {
- return
- }
- switch vrs[utils.Subscribers] {
- case current[utils.Subscribers]:
- if m.sameDataDB {
- return
- }
- return utils.ErrNotImplemented
- // return m.migrateCurrentSubscribers()
- }
- return
-}
diff --git a/migrator/thresholds.go b/migrator/thresholds.go
deleted file mode 100644
index 162fa98c2..000000000
--- a/migrator/thresholds.go
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package migrator
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/cgrates/birpc/context"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/utils"
-)
-
-func (m *Migrator) migrateCurrentThresholds() (err error) {
- var ids []string
- //ThresholdProfiles
- ids, err = m.dmIN.DataManager().DataDB().GetKeysForPrefix(context.TODO(), utils.ThresholdProfilePrefix)
- if err != nil {
- return err
- }
- for _, id := range ids {
- tntID := strings.SplitN(strings.TrimPrefix(id, utils.ThresholdProfilePrefix), utils.InInFieldSep, 2)
- if len(tntID) < 2 {
- return fmt.Errorf("Invalid key <%s> when migrating threshold profiles", id)
- }
- thps, err := m.dmIN.DataManager().GetThresholdProfile(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- ths, err := m.dmIN.DataManager().GetThreshold(context.TODO(), tntID[0], tntID[1], false, false, utils.NonTransactional)
- if err != nil {
- return err
- }
- if thps == nil || m.dryRun {
- continue
- }
- if err := m.dmOut.DataManager().SetThresholdProfile(context.TODO(), thps, true); err != nil {
- return err
- }
- // update the threshold in the new DB
- if ths != nil {
- if err := m.dmOut.DataManager().SetThreshold(context.TODO(), ths); err != nil {
- return err
- }
- }
- if err := m.dmIN.DataManager().RemoveThresholdProfile(context.TODO(), tntID[0], tntID[1], false); err != nil {
- return err
- }
- m.stats[utils.Thresholds]++
- }
- return
-}
-
-func (m *Migrator) removeV2Thresholds() (err error) {
- var v2T *v2Threshold
- for {
- v2T, err = m.dmIN.getV2ThresholdProfile()
- if err != nil && err != utils.ErrNoMoreData {
- return err
- }
- if err == utils.ErrNoMoreData {
- break
- }
- if err = m.dmIN.remV2ThresholdProfile(v2T.Tenant, v2T.ID); err != nil {
- return err
- }
- }
- return
-}
-
-func (m *Migrator) migrateV2Thresholds() (v3 *engine.ThresholdProfile, err error) {
- var v2T *v2Threshold
- if v2T, err = m.dmIN.getV2ThresholdProfile(); err != nil {
- return
- }
- if v2T == nil {
- return
- }
- v3 = v2T.V2toV3Threshold()
- return
-}
-
-func (m *Migrator) migrateThresholds() (err error) {
- var vrs engine.Versions
- current := engine.CurrentDataDBVersions()
- if vrs, err = m.getVersions(utils.Thresholds); err != nil {
- return
- }
- migrated := true
- migratedFrom := 0
- var th *engine.Threshold
- var filter *engine.Filter
- var v3 *engine.ThresholdProfile
- var v4 *engine.ThresholdProfile
- for {
- version := vrs[utils.Thresholds]
- migratedFrom = int(version)
- for {
- switch version {
- default:
- return fmt.Errorf("Unsupported version %v", version)
- case current[utils.Thresholds]:
- migrated = false
- if m.sameDataDB {
- break
- }
- if err = m.migrateCurrentThresholds(); err != nil {
- return
- }
- case 1:
- version = 3
- case 2:
- if v3, err = m.migrateV2Thresholds(); err != nil && err != utils.ErrNoMoreData {
- return
- }
- version = 3
- case 3:
- if v4, err = m.migrateV3ToV4Threshold(v3); err != nil && err != utils.ErrNoMoreData {
- return
- } else if err == utils.ErrNoMoreData {
- break
- }
- version = 4
- }
- if version == current[utils.Thresholds] || err == utils.ErrNoMoreData {
- break
- }
- }
- if err == utils.ErrNoMoreData || !migrated {
- break
- }
-
- if !m.dryRun {
- //set threshond
- if migratedFrom == 1 {
- if err = m.dmOut.DataManager().SetFilter(context.TODO(), filter, true); err != nil {
- return
- }
- }
- if err = m.dmOut.DataManager().SetThresholdProfile(context.TODO(), v4, true); err != nil {
- return
- }
- if migratedFrom == 1 { // do it after SetThresholdProfile to overwrite the created threshold
- if err = m.dmOut.DataManager().SetThreshold(context.TODO(), th); err != nil {
- return
- }
- }
- }
- m.stats[utils.Thresholds]++
- }
- if m.dryRun || !migrated {
- return nil
- }
- // remove old threshonds
- if !m.sameDataDB && migratedFrom == 2 {
- if err = m.removeV2Thresholds(); err != nil && err != utils.ErrNoMoreData {
- return
- }
- }
- // All done, update version wtih current one
- if err = m.setVersions(utils.Thresholds); err != nil {
- return
- }
- return m.ensureIndexesDataDB(engine.ColTps)
-}
-
-type v2Threshold struct {
- Tenant string
- ID string
- FilterIDs []string
- ActivationInterval *utils.ActivationInterval // Time when this limit becomes active and expires
- Recurrent bool
- MinHits int
- MinSleep time.Duration
- Blocker bool // blocker flag to stop processing on filters matched
- Weight float64 // Weight to sort the thresholds
- ActionIDs []string
- Async bool
-}
-
-func (v2T v2Threshold) V2toV3Threshold() (th *engine.ThresholdProfile) {
- th = &engine.ThresholdProfile{
- Tenant: v2T.Tenant,
- ID: v2T.ID,
- FilterIDs: v2T.FilterIDs,
- MinHits: v2T.MinHits,
- MinSleep: v2T.MinSleep,
- Blocker: v2T.Blocker,
- Weights: utils.DynamicWeights{
- {
- Weight: v2T.Weight,
- },
- },
- ActionProfileIDs: v2T.ActionIDs,
- Async: v2T.Async,
- }
- th.MaxHits = 1
- if v2T.Recurrent {
- th.MaxHits = -1
- }
- return
-}
-
-func (m *Migrator) migrateV3ToV4Threshold(v3sts *engine.ThresholdProfile) (v4Cpp *engine.ThresholdProfile, err error) {
- if v3sts == nil {
- // read data from DataDB
- if v3sts, err = m.dmIN.getV3ThresholdProfile(); err != nil {
- return
- }
- }
- if v3sts.FilterIDs, err = migrateInlineFilterV4(v3sts.FilterIDs); err != nil {
- return
- }
- return v3sts, nil
-}
diff --git a/services/accounts.go b/services/accounts.go
index 356a598eb..804f0e46e 100644
--- a/services/accounts.go
+++ b/services/accounts.go
@@ -56,7 +56,7 @@ func (acts *AccountService) Start(shutdown *utils.SyncedChan, registry *servmana
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, acts.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -71,7 +71,7 @@ func (acts *AccountService) Start(shutdown *utils.SyncedChan, registry *servmana
return err
}
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
acts.mu.Lock()
defer acts.mu.Unlock()
diff --git a/services/actions.go b/services/actions.go
index edab07e3f..7fa421e8b 100644
--- a/services/actions.go
+++ b/services/actions.go
@@ -56,7 +56,7 @@ func (acts *ActionService) Start(shutdown *utils.SyncedChan, registry *servmanag
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, acts.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -71,7 +71,7 @@ func (acts *ActionService) Start(shutdown *utils.SyncedChan, registry *servmanag
return err
}
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
acts.mu.Lock()
defer acts.mu.Unlock()
diff --git a/services/adminsv1.go b/services/adminsv1.go
index 2fa8d2da3..654a4ba9c 100644
--- a/services/adminsv1.go
+++ b/services/adminsv1.go
@@ -53,8 +53,7 @@ func (s *AdminSv1Service) Start(_ *utils.SyncedChan, registry *servmanager.Servi
utils.CommonListenerS,
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
- utils.StorDB,
+ utils.DB,
},
registry, s.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -63,13 +62,12 @@ func (s *AdminSv1Service) Start(_ *utils.SyncedChan, registry *servmanager.Servi
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dm := srvDeps[utils.DataDB].(*DataDBService).DataManager()
- sdb := srvDeps[utils.StorDB].(*StorDBService).DB()
+ dm := srvDeps[utils.DB].(*DataDBService).DataManager()
s.mu.Lock()
defer s.mu.Unlock()
- s.api = apis.NewAdminSv1(s.cfg, dm, cms.ConnManager(), fs, sdb)
+ s.api = apis.NewAdminSv1(s.cfg, dm, cms.ConnManager(), fs)
srv, _ := engine.NewService(s.api)
// srv, _ := birpc.NewService(s.api, "", false)
diff --git a/services/attributes.go b/services/attributes.go
index a57249cbf..8da6b3f47 100644
--- a/services/attributes.go
+++ b/services/attributes.go
@@ -51,7 +51,7 @@ func (attrS *AttributeService) Start(shutdown *utils.SyncedChan, registry *servm
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, attrS.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -66,7 +66,7 @@ func (attrS *AttributeService) Start(shutdown *utils.SyncedChan, registry *servm
return
}
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dm := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dm := srvDeps[utils.DB].(*DataDBService).DataManager()
attrS.mu.Lock()
defer attrS.mu.Unlock()
diff --git a/services/caches.go b/services/caches.go
index 88895767b..5687ed1a2 100644
--- a/services/caches.go
+++ b/services/caches.go
@@ -49,7 +49,7 @@ func (cS *CacheService) Start(shutdown *utils.SyncedChan, registry *servmanager.
srvDeps, err := WaitForServicesToReachState(utils.StateServiceUP,
[]string{
utils.CommonListenerS,
- utils.DataDB,
+ utils.DB,
utils.ConnManager,
utils.CoreS,
},
@@ -58,7 +58,7 @@ func (cS *CacheService) Start(shutdown *utils.SyncedChan, registry *servmanager.
return err
}
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
cs := srvDeps[utils.CoreS].(*CoreService)
diff --git a/services/cdrs.go b/services/cdrs.go
index d56c73ff3..ddde5d831 100644
--- a/services/cdrs.go
+++ b/services/cdrs.go
@@ -52,8 +52,7 @@ func (cs *CDRService) Start(_ *utils.SyncedChan, registry *servmanager.ServiceRe
utils.CommonListenerS,
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
- utils.StorDB,
+ utils.DB,
},
registry, cs.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -62,13 +61,12 @@ func (cs *CDRService) Start(_ *utils.SyncedChan, registry *servmanager.ServiceRe
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService)
- sdbs := srvDeps[utils.StorDB].(*StorDBService).DB()
+ dbs := srvDeps[utils.DB].(*DataDBService)
cs.mu.Lock()
defer cs.mu.Unlock()
- cs.cdrS = cdrs.NewCDRServer(cs.cfg, dbs.DataManager(), fs, cms.ConnManager(), sdbs)
+ cs.cdrS = cdrs.NewCDRServer(cs.cfg, dbs.DataManager(), fs, cms.ConnManager())
runtime.Gosched()
srv, err := engine.NewServiceWithPing(cs.cdrS, utils.CDRsV1, utils.V1Prfx)
if err != nil {
diff --git a/services/chargers.go b/services/chargers.go
index 65042fc1d..19c8e568f 100644
--- a/services/chargers.go
+++ b/services/chargers.go
@@ -52,7 +52,7 @@ func (chrS *ChargerService) Start(shutdown *utils.SyncedChan, registry *servmana
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, chrS.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -67,7 +67,7 @@ func (chrS *ChargerService) Start(shutdown *utils.SyncedChan, registry *servmana
return err
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
chrS.mu.Lock()
defer chrS.mu.Unlock()
diff --git a/services/datadb.go b/services/datadb.go
index 1c5d5adee..8a11781cb 100644
--- a/services/datadb.go
+++ b/services/datadb.go
@@ -41,7 +41,7 @@ func NewDataDBService(cfg *config.CGRConfig, setVersions bool) *DataDBService {
type DataDBService struct {
mu sync.RWMutex
cfg *config.CGRConfig
- oldDBCfg *config.DataDbCfg
+ oldDBCfg *config.DbCfg
dm *engine.DataManager
setVersions bool
stateDeps *StateDependencies // channel subscriptions for state changes
@@ -55,25 +55,39 @@ func (db *DataDBService) Start(_ *utils.SyncedChan, registry *servmanager.Servic
}
db.mu.Lock()
defer db.mu.Unlock()
- db.oldDBCfg = db.cfg.DataDbCfg().Clone()
- dbConn, err := engine.NewDataDBConn(db.cfg.DataDbCfg().Type,
- db.cfg.DataDbCfg().Host, db.cfg.DataDbCfg().Port,
- db.cfg.DataDbCfg().Name, db.cfg.DataDbCfg().User,
- db.cfg.DataDbCfg().Password, db.cfg.GeneralCfg().DBDataEncoding,
- db.cfg.DataDbCfg().Opts, db.cfg.DataDbCfg().Items)
- if err != nil { // Cannot configure getter database, show stopper
- utils.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
- return
+ db.oldDBCfg = db.cfg.DbCfg().Clone()
+ dbConnMap := new(engine.DBConnManager)
+ for dbConnKey, dbconn := range db.cfg.DbCfg().DBConns {
+ dbConn, err := engine.NewDataDBConn(dbconn.Type,
+ dbconn.Host, dbconn.Port, dbconn.Name, dbconn.User,
+ dbconn.Password, db.cfg.GeneralCfg().DBDataEncoding, dbconn.StringIndexedFields,
+ dbconn.PrefixIndexedFields, db.cfg.DbCfg().Opts, db.cfg.DbCfg().Items)
+ if err != nil { // Cannot configure getter database, show stopper
+ utils.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
+ return err
+ }
+ dbConnMap.AddDataDBDriver(dbConnKey, dbConn)
+ if dbconn.Type != utils.MetaInternal {
+ utils.Logger.Info(fmt.Sprintf(" connection established with <%s:%s> with DB name <%s>, Type <%s>", dbconn.Host, dbconn.Port, dbconn.Name, dbconn.Type))
+ } else {
+ utils.Logger.Info(" Internal DB established")
+ }
}
- db.dm = engine.NewDataManager(dbConn, db.cfg, cms.(*ConnManagerService).ConnManager())
-
+ db.dm = engine.NewDataManager(dbConnMap, db.cfg, cms.(*ConnManagerService).ConnManager())
if db.setVersions {
- err = engine.OverwriteDBVersions(dbConn)
+ dataDB, _, err := dbConnMap.GetConn(utils.CacheVersions)
+ if err != nil {
+ return err
+ }
+ if err = engine.OverwriteDBVersions(dataDB); err != nil {
+ return err
+ }
} else {
- err = engine.CheckVersions(db.dm.DataDB())
- }
- if err != nil {
- return err
+ for _, dataDB := range db.dm.DataDB() {
+ if err = engine.CheckVersions(dataDB); err != nil {
+ return err
+ }
+ }
}
return
}
@@ -83,26 +97,40 @@ func (db *DataDBService) Reload(_ *utils.SyncedChan, _ *servmanager.ServiceRegis
db.mu.Lock()
defer db.mu.Unlock()
if db.needsConnectionReload() {
- var d engine.DataDBDriver
- d, err = engine.NewDataDBConn(db.cfg.DataDbCfg().Type,
- db.cfg.DataDbCfg().Host, db.cfg.DataDbCfg().Port,
- db.cfg.DataDbCfg().Name, db.cfg.DataDbCfg().User,
- db.cfg.DataDbCfg().Password, db.cfg.GeneralCfg().DBDataEncoding,
- db.cfg.DataDbCfg().Opts, db.cfg.DataDbCfg().Items)
- if err != nil {
+ if err = db.dm.ReconnectAll(db.cfg); err != nil {
return
}
- db.dm.Reconnect(d)
- db.oldDBCfg = db.cfg.DataDbCfg().Clone()
+ db.oldDBCfg = db.cfg.DbCfg().Clone()
return
}
- if db.cfg.DataDbCfg().Type == utils.MetaMongo {
- mgo, canCast := db.dm.DataDB().(*engine.MongoStorage)
- if !canCast {
- return fmt.Errorf("can't conver DataDB of type %s to MongoStorage",
- db.cfg.DataDbCfg().Type)
+ for dbKey, dbConn := range db.cfg.DbCfg().DBConns {
+ switch dbConn.Type {
+ case utils.MetaMongo:
+ mgo, canCast := db.dm.DataDB()[dbKey].(*engine.MongoStorage)
+ if !canCast {
+ return fmt.Errorf("can't conver DataDB of type %s to MongoStorage",
+ dbConn.Type)
+ }
+ mgo.SetTTL(db.cfg.DbCfg().Opts.MongoQueryTimeout)
+ case utils.MetaPostgres, utils.MetaMySQL:
+ msql, canCast := db.dm.DataDB()[dbKey].(*engine.SQLStorage)
+ if !canCast {
+ return fmt.Errorf("can't convert DB of type %s to SQLStorage",
+ dbConn.Type)
+ }
+ msql.DB.SetMaxOpenConns(db.cfg.DbCfg().Opts.SQLMaxOpenConns)
+ msql.DB.SetMaxIdleConns(db.cfg.DbCfg().Opts.SQLMaxIdleConns)
+ msql.DB.SetConnMaxLifetime(db.cfg.DbCfg().Opts.SQLConnMaxLifetime)
+ case utils.MetaInternal:
+ idb, canCast := db.dm.DataDB()[dbKey].(*engine.InternalDB)
+ if !canCast {
+ return fmt.Errorf("can't convert DB of type %s to InternalDB",
+ dbConn.Type)
+ }
+ idb.SetStringIndexedFields(dbConn.StringIndexedFields)
+ idb.SetPrefixIndexedFields(dbConn.PrefixIndexedFields)
}
- mgo.SetTTL(db.cfg.DataDbCfg().Opts.MongoQueryTimeout)
+
}
return
}
@@ -128,13 +156,15 @@ func (db *DataDBService) Shutdown(registry *servmanager.ServiceRegistry) error {
}
db.mu.Lock()
defer db.mu.Unlock()
- db.dm.DataDB().Close()
+ for dataDBKey := range db.dm.DataDB() {
+ db.dm.DataDB()[dataDBKey].Close()
+ }
return nil
}
// ServiceName returns the service name
func (db *DataDBService) ServiceName() string {
- return utils.DataDB
+ return utils.DB
}
// ShouldRun returns if the service should be running
@@ -144,35 +174,60 @@ func (db *DataDBService) ShouldRun() bool { // db should allways run
// needsConnectionReload returns if the DB connection needs to reloaded
func (db *DataDBService) needsConnectionReload() bool {
- if db.oldDBCfg.Type != db.cfg.DataDbCfg().Type ||
- db.oldDBCfg.Host != db.cfg.DataDbCfg().Host ||
- db.oldDBCfg.Name != db.cfg.DataDbCfg().Name ||
- db.oldDBCfg.Port != db.cfg.DataDbCfg().Port ||
- db.oldDBCfg.User != db.cfg.DataDbCfg().User ||
- db.oldDBCfg.Password != db.cfg.DataDbCfg().Password {
+ if len(db.oldDBCfg.DBConns) != len(db.cfg.DbCfg().DBConns) {
return true
}
- if db.cfg.DataDbCfg().Type == utils.MetaInternal { // in case of internal recreate the db using the new config
- for key, itm := range db.oldDBCfg.Items {
- if db.cfg.DataDbCfg().Items[key].Limit != itm.Limit &&
- db.cfg.DataDbCfg().Items[key].StaticTTL != itm.StaticTTL &&
- db.cfg.DataDbCfg().Items[key].TTL != itm.TTL {
- return true
+ for dbConnKey, dbConn := range db.oldDBCfg.DBConns {
+ if _, has := db.cfg.DbCfg().DBConns[dbConnKey]; !has {
+ return true
+ }
+ if dbConn.Type != db.cfg.DbCfg().DBConns[dbConnKey].Type ||
+ dbConn.Host != db.cfg.DbCfg().DBConns[dbConnKey].Host ||
+ dbConn.Name != db.cfg.DbCfg().DBConns[dbConnKey].Name ||
+ dbConn.Port != db.cfg.DbCfg().DBConns[dbConnKey].Port ||
+ dbConn.User != db.cfg.DbCfg().DBConns[dbConnKey].User ||
+ dbConn.Password != db.cfg.DbCfg().DBConns[dbConnKey].Password ||
+ !utils.EqualUnorderedStringSlices(dbConn.StringIndexedFields,
+ db.cfg.DbCfg().DBConns[dbConnKey].StringIndexedFields) ||
+ !utils.EqualUnorderedStringSlices(dbConn.PrefixIndexedFields,
+ db.cfg.DbCfg().DBConns[dbConnKey].PrefixIndexedFields) {
+ return true
+ }
+ if db.cfg.DbCfg().DBConns[dbConnKey].Type == utils.MetaInternal { // in case of internal recreate the db using the new config
+ for key, itm := range db.oldDBCfg.Items {
+ if db.cfg.DbCfg().Items[key].Limit != itm.Limit &&
+ db.cfg.DbCfg().Items[key].StaticTTL != itm.StaticTTL &&
+ db.cfg.DbCfg().Items[key].TTL != itm.TTL &&
+ db.cfg.DbCfg().Items[key].DBConn != itm.DBConn {
+ return true
+ }
}
}
+ if db.oldDBCfg.DBConns[dbConnKey].Type == utils.MetaRedis &&
+ (db.oldDBCfg.Opts.RedisMaxConns != db.cfg.DbCfg().Opts.RedisMaxConns ||
+ db.oldDBCfg.Opts.RedisConnectAttempts != db.cfg.DbCfg().Opts.RedisConnectAttempts ||
+ db.oldDBCfg.Opts.RedisSentinel != db.cfg.DbCfg().Opts.RedisSentinel ||
+ db.oldDBCfg.Opts.RedisCluster != db.cfg.DbCfg().Opts.RedisCluster ||
+ db.oldDBCfg.Opts.RedisClusterSync != db.cfg.DbCfg().Opts.RedisClusterSync ||
+ db.oldDBCfg.Opts.RedisClusterOndownDelay != db.cfg.DbCfg().Opts.RedisClusterOndownDelay ||
+ db.oldDBCfg.Opts.RedisConnectTimeout != db.cfg.DbCfg().Opts.RedisConnectTimeout ||
+ db.oldDBCfg.Opts.RedisReadTimeout != db.cfg.DbCfg().Opts.RedisReadTimeout ||
+ db.oldDBCfg.Opts.RedisWriteTimeout != db.cfg.DbCfg().Opts.RedisWriteTimeout ||
+ db.oldDBCfg.Opts.RedisPoolPipelineWindow != db.cfg.DbCfg().Opts.RedisPoolPipelineWindow ||
+ db.oldDBCfg.Opts.RedisPoolPipelineLimit != db.cfg.DbCfg().Opts.RedisPoolPipelineLimit) {
+ return true
+ }
+ if db.cfg.DbCfg().DBConns[dbConnKey].Type == utils.MetaPostgres &&
+ (db.oldDBCfg.Opts.PgSSLMode != db.cfg.DbCfg().Opts.PgSSLMode ||
+ db.oldDBCfg.Opts.PgSSLCert != db.cfg.DbCfg().Opts.PgSSLCert ||
+ db.oldDBCfg.Opts.PgSSLKey != db.cfg.DbCfg().Opts.PgSSLKey ||
+ db.oldDBCfg.Opts.PgSSLPassword != db.cfg.DbCfg().Opts.PgSSLPassword ||
+ db.oldDBCfg.Opts.PgSSLCertMode != db.cfg.DbCfg().Opts.PgSSLCertMode ||
+ db.oldDBCfg.Opts.PgSSLRootCert != db.cfg.DbCfg().Opts.PgSSLRootCert) {
+ return true
+ }
}
- return db.oldDBCfg.Type == utils.MetaRedis &&
- (db.oldDBCfg.Opts.RedisMaxConns != db.cfg.DataDbCfg().Opts.RedisMaxConns ||
- db.oldDBCfg.Opts.RedisConnectAttempts != db.cfg.DataDbCfg().Opts.RedisConnectAttempts ||
- db.oldDBCfg.Opts.RedisSentinel != db.cfg.DataDbCfg().Opts.RedisSentinel ||
- db.oldDBCfg.Opts.RedisCluster != db.cfg.DataDbCfg().Opts.RedisCluster ||
- db.oldDBCfg.Opts.RedisClusterSync != db.cfg.DataDbCfg().Opts.RedisClusterSync ||
- db.oldDBCfg.Opts.RedisClusterOndownDelay != db.cfg.DataDbCfg().Opts.RedisClusterOndownDelay ||
- db.oldDBCfg.Opts.RedisConnectTimeout != db.cfg.DataDbCfg().Opts.RedisConnectTimeout ||
- db.oldDBCfg.Opts.RedisReadTimeout != db.cfg.DataDbCfg().Opts.RedisReadTimeout ||
- db.oldDBCfg.Opts.RedisWriteTimeout != db.cfg.DataDbCfg().Opts.RedisWriteTimeout ||
- db.oldDBCfg.Opts.RedisPoolPipelineWindow != db.cfg.DataDbCfg().Opts.RedisPoolPipelineWindow ||
- db.oldDBCfg.Opts.RedisPoolPipelineLimit != db.cfg.DataDbCfg().Opts.RedisPoolPipelineLimit)
+ return false
}
// DataManager returns the DataManager object.
diff --git a/services/ers.go b/services/ers.go
index 1ac0e9eec..5bbff9b6d 100644
--- a/services/ers.go
+++ b/services/ers.go
@@ -55,7 +55,7 @@ func (erS *EventReaderService) Start(shutdown *utils.SyncedChan, registry *servm
utils.CommonListenerS,
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, erS.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -64,7 +64,7 @@ func (erS *EventReaderService) Start(shutdown *utils.SyncedChan, registry *servm
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
erS.mu.Lock()
defer erS.mu.Unlock()
diff --git a/services/filters.go b/services/filters.go
index e7f113200..57099307c 100644
--- a/services/filters.go
+++ b/services/filters.go
@@ -49,7 +49,7 @@ func (s *FilterService) Start(shutdown *utils.SyncedChan, registry *servmanager.
[]string{
utils.ConnManager,
utils.CacheS,
- utils.DataDB,
+ utils.DB,
},
registry, s.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -60,7 +60,7 @@ func (s *FilterService) Start(shutdown *utils.SyncedChan, registry *servmanager.
if err = cacheS.WaitToPrecache(shutdown, utils.CacheFilters); err != nil {
return err
}
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
s.mu.Lock()
defer s.mu.Unlock()
diff --git a/services/ips.go b/services/ips.go
index 3dc60318c..81489d81a 100644
--- a/services/ips.go
+++ b/services/ips.go
@@ -53,7 +53,7 @@ func (s *IPService) Start(shutdown *utils.SyncedChan, registry *servmanager.Serv
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, s.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -69,7 +69,7 @@ func (s *IPService) Start(shutdown *utils.SyncedChan, registry *servmanager.Serv
return err
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
s.mu.Lock()
defer s.mu.Unlock()
diff --git a/services/loaders.go b/services/loaders.go
index 847d6e746..e39e38aea 100644
--- a/services/loaders.go
+++ b/services/loaders.go
@@ -57,7 +57,7 @@ func (s *LoaderService) Start(_ *utils.SyncedChan, registry *servmanager.Service
utils.CommonListenerS,
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, s.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -66,7 +66,7 @@ func (s *LoaderService) Start(_ *utils.SyncedChan, registry *servmanager.Service
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
s.mu.Lock()
defer s.mu.Unlock()
@@ -107,7 +107,7 @@ func (s *LoaderService) Reload(_ *utils.SyncedChan, registry *servmanager.Servic
[]string{
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, s.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -115,7 +115,7 @@ func (s *LoaderService) Reload(_ *utils.SyncedChan, registry *servmanager.Servic
}
cms := srvDeps[utils.ConnManager].(*ConnManagerService).ConnManager()
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
close(s.stopChan)
s.stopChan = make(chan struct{})
diff --git a/services/rankings.go b/services/rankings.go
index 970621e78..2849edc2d 100644
--- a/services/rankings.go
+++ b/services/rankings.go
@@ -53,7 +53,7 @@ func (ran *RankingService) Start(shutdown *utils.SyncedChan, registry *servmanag
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, ran.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -68,7 +68,7 @@ func (ran *RankingService) Start(shutdown *utils.SyncedChan, registry *servmanag
return err
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
ran.mu.Lock()
defer ran.mu.Unlock()
diff --git a/services/rates.go b/services/rates.go
index cbd9f5429..a957ef8f2 100644
--- a/services/rates.go
+++ b/services/rates.go
@@ -55,7 +55,7 @@ func (rs *RateService) Start(shutdown *utils.SyncedChan, registry *servmanager.S
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, rs.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -71,7 +71,7 @@ func (rs *RateService) Start(shutdown *utils.SyncedChan, registry *servmanager.S
return err
}
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
rs.mu.Lock()
rs.rateS = rates.NewRateS(rs.cfg, fs, dbs)
diff --git a/services/resources.go b/services/resources.go
index 504bf5544..75a19e98e 100644
--- a/services/resources.go
+++ b/services/resources.go
@@ -53,7 +53,7 @@ func (reS *ResourceService) Start(shutdown *utils.SyncedChan, registry *servmana
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, reS.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -69,7 +69,7 @@ func (reS *ResourceService) Start(shutdown *utils.SyncedChan, registry *servmana
return
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
reS.mu.Lock()
defer reS.mu.Unlock()
diff --git a/services/routes.go b/services/routes.go
index 3d0a8c3ae..8a6908263 100644
--- a/services/routes.go
+++ b/services/routes.go
@@ -52,7 +52,7 @@ func (routeS *RouteService) Start(shutdown *utils.SyncedChan, registry *servmana
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, routeS.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -67,7 +67,7 @@ func (routeS *RouteService) Start(shutdown *utils.SyncedChan, registry *servmana
return
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
routeS.mu.Lock()
defer routeS.mu.Unlock()
diff --git a/services/sessions.go b/services/sessions.go
index dd14d33a7..d7c02817a 100644
--- a/services/sessions.go
+++ b/services/sessions.go
@@ -56,7 +56,7 @@ func (smg *SessionService) Start(shutdown *utils.SyncedChan, registry *servmanag
utils.CommonListenerS,
utils.ConnManager,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, smg.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -65,7 +65,7 @@ func (smg *SessionService) Start(shutdown *utils.SyncedChan, registry *servmanag
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cms := srvDeps[utils.ConnManager].(*ConnManagerService)
fs := srvDeps[utils.FilterS].(*FilterService).FilterS()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
smg.mu.Lock()
defer smg.mu.Unlock()
diff --git a/services/stats.go b/services/stats.go
index 8888712c4..ccada8eeb 100644
--- a/services/stats.go
+++ b/services/stats.go
@@ -52,7 +52,7 @@ func (sts *StatService) Start(shutdown *utils.SyncedChan, registry *servmanager.
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, sts.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -68,7 +68,7 @@ func (sts *StatService) Start(shutdown *utils.SyncedChan, registry *servmanager.
return
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
sts.mu.Lock()
defer sts.mu.Unlock()
diff --git a/services/stordb.go b/services/stordb.go
deleted file mode 100644
index 798c74264..000000000
--- a/services/stordb.go
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
-Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
-Copyright (C) ITsysCOM GmbH
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
-*/
-
-package services
-
-import (
- "fmt"
- "sync"
-
- "github.com/cgrates/cgrates/config"
- "github.com/cgrates/cgrates/engine"
- "github.com/cgrates/cgrates/servmanager"
- "github.com/cgrates/cgrates/utils"
-)
-
-// NewStorDBService returns the StorDB Service
-func NewStorDBService(cfg *config.CGRConfig, setVersions bool) *StorDBService {
- return &StorDBService{
- cfg: cfg,
- setVersions: setVersions,
- stateDeps: NewStateDependencies([]string{utils.StateServiceUP, utils.StateServiceDOWN}),
- }
-}
-
-// StorDBService implements Service interface
-type StorDBService struct {
- mu sync.RWMutex
- cfg *config.CGRConfig
- oldDBCfg *config.StorDbCfg
- db engine.StorDB
- setVersions bool
- stateDeps *StateDependencies // channel subscriptions for state changes
-}
-
-// Start should handle the service start
-func (db *StorDBService) Start(_ *utils.SyncedChan, _ *servmanager.ServiceRegistry) (err error) {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.oldDBCfg = db.cfg.StorDbCfg().Clone()
- dbConn, err := engine.NewStorDBConn(db.cfg.StorDbCfg().Type, db.cfg.StorDbCfg().Host,
- db.cfg.StorDbCfg().Port, db.cfg.StorDbCfg().Name, db.cfg.StorDbCfg().User,
- db.cfg.StorDbCfg().Password, db.cfg.GeneralCfg().DBDataEncoding,
- db.cfg.StorDbCfg().StringIndexedFields, db.cfg.StorDbCfg().PrefixIndexedFields,
- db.cfg.StorDbCfg().Opts, db.cfg.StorDbCfg().Items)
- if err != nil { // Cannot configure getter database, show stopper
- utils.Logger.Crit(fmt.Sprintf("Could not configure storDB: %s exiting!", err))
- return
- }
- db.db = dbConn
-
- if db.setVersions {
- err = engine.OverwriteDBVersions(dbConn)
- } else {
- err = engine.CheckVersions(db.db)
- }
- if err != nil {
- return err
- }
- return
-}
-
-// Reload handles the change of config
-func (db *StorDBService) Reload(_ *utils.SyncedChan, _ *servmanager.ServiceRegistry) (err error) {
- db.mu.Lock()
- defer db.mu.Unlock()
- if db.needsConnectionReload() {
- var d engine.StorDB
- if d, err = engine.NewStorDBConn(db.cfg.StorDbCfg().Type, db.cfg.StorDbCfg().Host,
- db.cfg.StorDbCfg().Port, db.cfg.StorDbCfg().Name, db.cfg.StorDbCfg().User,
- db.cfg.StorDbCfg().Password, db.cfg.GeneralCfg().DBDataEncoding,
- db.cfg.StorDbCfg().StringIndexedFields, db.cfg.StorDbCfg().PrefixIndexedFields,
- db.cfg.StorDbCfg().Opts, db.cfg.StorDbCfg().Items); err != nil {
- return
- }
- db.db.Close()
- db.db = d
- db.oldDBCfg = db.cfg.StorDbCfg().Clone()
- return
- }
- if db.cfg.StorDbCfg().Type == utils.MetaMongo {
- mgo, canCast := db.db.(*engine.MongoStorage)
- if !canCast {
- return fmt.Errorf("can't conver StorDB of type %s to MongoStorage",
- db.cfg.StorDbCfg().Type)
- }
- mgo.SetTTL(db.cfg.StorDbCfg().Opts.MongoQueryTimeout)
- } else if db.cfg.StorDbCfg().Type == utils.MetaPostgres ||
- db.cfg.StorDbCfg().Type == utils.MetaMySQL {
- msql, canCast := db.db.(*engine.SQLStorage)
- if !canCast {
- return fmt.Errorf("can't conver StorDB of type %s to SQLStorage",
- db.cfg.StorDbCfg().Type)
- }
- msql.DB.SetMaxOpenConns(db.cfg.StorDbCfg().Opts.SQLMaxOpenConns)
- msql.DB.SetMaxIdleConns(db.cfg.StorDbCfg().Opts.SQLMaxIdleConns)
- msql.DB.SetConnMaxLifetime(db.cfg.StorDbCfg().Opts.SQLConnMaxLifetime)
- } else if db.cfg.StorDbCfg().Type == utils.MetaInternal {
- idb, canCast := db.db.(*engine.InternalDB)
- if !canCast {
- return fmt.Errorf("can't conver StorDB of type %s to InternalDB",
- db.cfg.StorDbCfg().Type)
- }
- idb.SetStringIndexedFields(db.cfg.StorDbCfg().StringIndexedFields)
- idb.SetPrefixIndexedFields(db.cfg.StorDbCfg().PrefixIndexedFields)
- }
- return
-}
-
-// Shutdown stops the service
-func (db *StorDBService) Shutdown(_ *servmanager.ServiceRegistry) (_ error) {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.db.Close()
- db.db = nil
- return
-}
-
-// isRunning returns if the service is running (not thread safe)
-func (db *StorDBService) isRunning() bool {
- return db.db != nil
-}
-
-// ServiceName returns the service name
-func (db *StorDBService) ServiceName() string {
- return utils.StorDB
-}
-
-// ShouldRun returns if the service should be running
-func (db *StorDBService) ShouldRun() bool {
- return db.cfg.CdrsCfg().Enabled || db.cfg.AdminSCfg().Enabled
-}
-
-// needsConnectionReload returns if the DB connection needs to reloaded
-func (db *StorDBService) needsConnectionReload() bool {
- if db.oldDBCfg.Type != db.cfg.StorDbCfg().Type ||
- db.oldDBCfg.Host != db.cfg.StorDbCfg().Host ||
- db.oldDBCfg.Name != db.cfg.StorDbCfg().Name ||
- db.oldDBCfg.Port != db.cfg.StorDbCfg().Port ||
- db.oldDBCfg.User != db.cfg.StorDbCfg().User ||
- db.oldDBCfg.Password != db.cfg.StorDbCfg().Password {
- return true
- }
- return db.cfg.StorDbCfg().Type == utils.MetaPostgres &&
- (db.oldDBCfg.Opts.PgSSLMode != db.cfg.StorDbCfg().Opts.PgSSLMode ||
- db.oldDBCfg.Opts.PgSSLCert != db.cfg.StorDbCfg().Opts.PgSSLCert ||
- db.oldDBCfg.Opts.PgSSLKey != db.cfg.StorDbCfg().Opts.PgSSLKey ||
- db.oldDBCfg.Opts.PgSSLPassword != db.cfg.StorDbCfg().Opts.PgSSLPassword ||
- db.oldDBCfg.Opts.PgSSLCertMode != db.cfg.StorDbCfg().Opts.PgSSLCertMode ||
- db.oldDBCfg.Opts.PgSSLRootCert != db.cfg.StorDbCfg().Opts.PgSSLRootCert)
-}
-
-// DB returns the db connection object.
-func (db *StorDBService) DB() engine.StorDB {
- return db.db
-}
-
-// StateChan returns signaling channel of specific state
-func (db *StorDBService) StateChan(stateID string) chan struct{} {
- return db.stateDeps.StateChan(stateID)
-}
diff --git a/services/thresholds.go b/services/thresholds.go
index 5db0202d2..aea5fa73f 100644
--- a/services/thresholds.go
+++ b/services/thresholds.go
@@ -52,7 +52,7 @@ func (thrs *ThresholdService) Start(shutdown *utils.SyncedChan, registry *servma
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, thrs.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -68,7 +68,7 @@ func (thrs *ThresholdService) Start(shutdown *utils.SyncedChan, registry *servma
return
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
thrs.mu.Lock()
defer thrs.mu.Unlock()
diff --git a/services/tpes.go b/services/tpes.go
index 26969e102..f87123e6c 100644
--- a/services/tpes.go
+++ b/services/tpes.go
@@ -52,7 +52,7 @@ func (ts *TPeService) Start(_ *utils.SyncedChan, registry *servmanager.ServiceRe
[]string{
utils.CommonListenerS,
utils.ConnManager,
- utils.DataDB,
+ utils.DB,
},
registry, ts.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -60,7 +60,7 @@ func (ts *TPeService) Start(_ *utils.SyncedChan, registry *servmanager.ServiceRe
}
cl := srvDeps[utils.CommonListenerS].(*CommonListenerService).CLS()
cm := srvDeps[utils.ConnManager].(*ConnManagerService).ConnManager()
- dbs := srvDeps[utils.DataDB].(*DataDBService).DataManager()
+ dbs := srvDeps[utils.DB].(*DataDBService).DataManager()
ts.mu.Lock()
defer ts.mu.Unlock()
diff --git a/services/trends.go b/services/trends.go
index ee116c3e2..a9d704735 100644
--- a/services/trends.go
+++ b/services/trends.go
@@ -52,7 +52,7 @@ func (trs *TrendService) Start(shutdown *utils.SyncedChan, registry *servmanager
utils.ConnManager,
utils.CacheS,
utils.FilterS,
- utils.DataDB,
+ utils.DB,
},
registry, trs.cfg.GeneralCfg().ConnectTimeout)
if err != nil {
@@ -67,7 +67,7 @@ func (trs *TrendService) Start(shutdown *utils.SyncedChan, registry *servmanager
return err
}
fs := srvDeps[utils.FilterS].(*FilterService)
- dbs := srvDeps[utils.DataDB].(*DataDBService)
+ dbs := srvDeps[utils.DB].(*DataDBService)
trs.mu.Lock()
defer trs.mu.Unlock()
diff --git a/tpes/tpes.go b/tpes/tpes.go
index eaf01426e..0e567d724 100644
--- a/tpes/tpes.go
+++ b/tpes/tpes.go
@@ -65,38 +65,55 @@ type ArgsExportTP struct {
}
func getTariffPlansKeys(ctx *context.Context, dm *engine.DataManager, tnt, expType string) (profileIDs []string, err error) {
+ var itemID string
var prfx string
switch expType {
case utils.MetaAttributes:
prfx = utils.AttributeProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaAttributeProfiles
case utils.MetaActions:
prfx = utils.ActionProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaActionProfiles
case utils.MetaAccounts:
prfx = utils.AccountPrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaAccounts
case utils.MetaChargers:
prfx = utils.ChargerProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaChargerProfiles
case utils.MetaFilters:
prfx = utils.FilterPrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaFilters
case utils.MetaRates:
prfx = utils.RateProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaRateProfiles
case utils.MetaResources:
prfx = utils.ResourceProfilesPrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaResourceProfiles
case utils.MetaRoutes:
prfx = utils.RouteProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaRouteProfiles
case utils.MetaStats:
prfx = utils.StatQueueProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaStatQueueProfiles
case utils.MetaThresholds:
prfx = utils.ThresholdProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaThresholdProfiles
case utils.MetaRankings:
prfx = utils.RankingProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaRankingProfiles
case utils.MetaTrends:
prfx = utils.TrendProfilePrefix + tnt + utils.ConcatenatedKeySep
+ itemID = utils.MetaTrendProfiles
default:
return nil, fmt.Errorf("Unsuported exporter type")
}
// dbKeys will contain the full name of the key, but we will need just the IDs e.g. "alp_cgrates.org:ATTR_1" -- just ATTR_1
+ dataDB, _, err := dm.DBConns().GetConn(itemID)
+ if err != nil {
+ return nil, err
+ }
var dbKeys []string
- if dbKeys, err = dm.DataDB().GetKeysForPrefix(ctx, prfx); err != nil {
+ if dbKeys, err = dataDB.GetKeysForPrefix(ctx, prfx); err != nil {
return nil, err
}
profileIDs = make([]string, 0, len(dbKeys))
diff --git a/utils/consts.go b/utils/consts.go
index bc93144b1..629fdaf25 100644
--- a/utils/consts.go
+++ b/utils/consts.go
@@ -234,9 +234,12 @@ const (
CSV = "csv"
FWV = "fwv"
MetaMongo = "*mongo"
+ Mongo = "mongo"
MetaRedis = "*redis"
+ Redis = "redis"
MetaPostgres = "*postgres"
MetaInternal = "*internal"
+ Internal = "internal"
MetaLocalHost = "*localhost"
MetaBiJSONLocalHost = "*bijson_localhost"
MetaRatingSubjectPrefix = "*zero"
@@ -359,8 +362,8 @@ const (
MetaDumpToFile = "*dump_to_file"
MetaDumpToJSON = "*dump_to_json"
NonTransactional = ""
- DataDB = "data_db"
- StorDB = "stor_db"
+ DB = "db"
+ StorDB = "StorDB"
NotFoundCaps = "NOT_FOUND"
ServerErrorCaps = "SERVER_ERROR"
MandatoryIEMissingCaps = "MANDATORY_IE_MISSING"
@@ -2081,6 +2084,7 @@ const (
// DataDbCfg
const (
+ DataDbConnsCfg = "db_conns"
DataDbTypeCfg = "db_type"
DataDbHostCfg = "db_host"
DataDbPortCfg = "db_port"
@@ -2122,6 +2126,7 @@ const (
TTLCfg = "ttl"
LimitCfg = "limit"
StaticTTLCfg = "static_ttl"
+ DBConnCfg = "dbConn"
)
// Tls
@@ -2490,16 +2495,10 @@ const (
// MigratorCgrCfg
const (
- OutDataDBTypeCfg = "out_datadb_type"
- OutDataDBHostCfg = "out_datadb_host"
- OutDataDBPortCfg = "out_datadb_port"
- OutDataDBNameCfg = "out_datadb_name"
- OutDataDBUserCfg = "out_datadb_user"
- OutDataDBPasswordCfg = "out_datadb_password"
- OutDataDBEncodingCfg = "out_datadb_encoding"
- OutDataDBRedisSentinel = "out_redis_sentinel"
- OutDataDBOptsCfg = "out_datadb_opts"
- UsersFiltersCfg = "users_filters"
+ OutDBRedisSentinel = "out_redis_sentinel"
+ OutDBOptsCfg = "out_db_opts"
+ UsersFiltersCfg = "users_filters"
+ FromItemsCfg = "fromItems"
)
// MailerCfg
diff --git a/utils/coreutils.go b/utils/coreutils.go
index eb72a75cc..ac84d806a 100644
--- a/utils/coreutils.go
+++ b/utils/coreutils.go
@@ -1029,3 +1029,19 @@ func ParseBinarySize(size string) (int64, error) {
return 0, fmt.Errorf("unknown unit: %s", unit)
}
+
+// EqualUnorderedStringSlices will return true if the length of the slices and strings
+// inside the slices are equal, no matter the order
+func EqualUnorderedStringSlices(slice1, slice2 []string) bool {
+ if len(slice1) != len(slice2) {
+ return false
+ }
+ // Make copies to avoid modifying originals
+ s1 := slices.Clone(slice1)
+ s2 := slices.Clone(slice2)
+ // Sort both slices
+ slices.Sort(s1)
+ slices.Sort(s2)
+ // Compare sorted slices
+ return slices.Equal(s1, s2)
+}