From a168f262e24f4368f1eef2e344813fc1b0103cb1 Mon Sep 17 00:00:00 2001 From: arberkatellari Date: Fri, 28 Mar 2025 19:36:41 +0200 Subject: [PATCH] Add dump and restore functionality for internal DBs --- apier/v1/apier.go | 59 ++ apier/v1/filter_indexes.go | 51 +- config/config_defaults.go | 12 + config/configsanity.go | 23 +- config/datadbcfg.go | 144 ++-- config/libconfig_json.go | 66 +- config/stordbcfg.go | 122 ++- data/ansible/roles/go/defaults/main.yaml | 2 +- .../samples/fs_offline_internal/cgrates.json | 130 +++ .../samples/fs_offline_mysql/cgrates.json | 110 +++ .../samples/offline_internal/cgrates.json | 128 +++ .../offline_internal_limit/cgrates.json | 130 +++ .../samples/offline_internal_ms/cgrates.json | 128 +++ .../offline_internal_ms_limit/cgrates.json | 130 +++ .../offline_internal_ms_rewrite/cgrates.json | 130 +++ .../cgrates.json | 132 +++ .../cgrates.json | 130 +++ .../cgrates.json | 126 +++ .../offline_internal_rewrite/cgrates.json | 130 +++ .../cgrates.json | 132 +++ .../offline_internal_rewrite_ms/cgrates.json | 130 +++ .../cgrates.json | 132 +++ .../cgrates.json | 93 +++ .../samples/rerate_cdrs_internal/cgrates.json | 44 +- .../samples/rerate_cdrs_mysql/cgrates.json | 29 +- .../cgrates.json | 17 +- data/docker/integration/Dockerfile | 2 +- docs/installation.rst | 4 +- ees/ees.go | 2 +- ees/libcdre.go | 4 +- engine/action.go | 13 +- engine/action_plan.go | 9 +- engine/caches.go | 76 +- engine/cdr.go | 5 + engine/connmanager.go | 2 +- engine/datadbmock.go | 16 + engine/datamanager.go | 19 +- engine/destinations.go | 20 + engine/dispatcherprfl.go | 44 + engine/filters.go | 56 ++ engine/globalvars.go | 2 +- engine/libattributes.go | 50 ++ engine/libchargers.go | 27 + engine/libindex_health.go | 8 +- engine/librankings.go | 38 + engine/libstats.go | 85 ++ engine/libtrends.go | 10 + engine/rateinterval.go | 16 +- engine/ratingplan.go | 62 ++ engine/ratingprofile.go | 52 ++ engine/resources.go | 79 ++ engine/routes.go | 76 ++ engine/storage_interface.go | 6 + engine/storage_internal_datadb.go | 50 +- engine/storage_internal_stordb.go | 15 + engine/storage_mongo_datadb.go | 15 + engine/storage_mongo_stordb.go | 15 + engine/storage_redis.go | 17 +- engine/storage_sql.go | 15 + engine/storage_utils.go | 45 +- engine/thresholds.go | 56 ++ engine/tpreader_test.go | 20 +- ers/ers.go | 2 +- go.mod | 17 +- go.sum | 30 +- .../copr.fedorainfracloud.org/cgrates.spec | 2 +- packages/redhat_fedora/cgrates.spec | 2 +- utils/apitpdata.go | 767 ++++++++++++++++++ utils/consts.go | 54 +- utils/rsrfilters.go | 15 + 70 files changed, 4077 insertions(+), 303 deletions(-) create mode 100644 data/conf/samples/fs_offline_internal/cgrates.json create mode 100644 data/conf/samples/fs_offline_mysql/cgrates.json create mode 100644 data/conf/samples/offline_internal/cgrates.json create mode 100644 data/conf/samples/offline_internal_limit/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms_limit/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms_rewrite/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms_rewrite_limit/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms_rewrite_ms/cgrates.json create mode 100644 data/conf/samples/offline_internal_ms_rewrite_ms_limit/cgrates.json create mode 100644 data/conf/samples/offline_internal_rewrite/cgrates.json create mode 100644 data/conf/samples/offline_internal_rewrite_limit/cgrates.json create mode 100644 data/conf/samples/offline_internal_rewrite_ms/cgrates.json create mode 100644 data/conf/samples/offline_internal_rewrite_ms_limit/cgrates.json create mode 100644 data/conf/samples/reprocess_cdrs_stats_ees_internal/cgrates.json diff --git a/apier/v1/apier.go b/apier/v1/apier.go index bf0abb450..b2b74a81b 100644 --- a/apier/v1/apier.go +++ b/apier/v1/apier.go @@ -2076,3 +2076,62 @@ func (apierSv1 *APIerSv1) TimingIsActiveAt(ctx *context.Context, params TimePara } return } + +// DumpDataDB will dump all of datadb from memory to a file +func (apierSv1 *APIerSv1) DumpDataDB(ctx *context.Context, ignr *string, reply *string) (err error) { + if err = apierSv1.DataManager.DataDB().DumpDataDB(); err != nil { + return + } + *reply = utils.OK + return +} + +// Will rewrite every dump file of DataDB +func (apierSv1 *APIerSv1) RewriteDataDB(ctx *context.Context, ignr *string, reply *string) (err error) { + if err = apierSv1.DataManager.DataDB().RewriteDataDB(); err != nil { + return + } + *reply = utils.OK + return +} + +// DumpStorDB will dump all of stordb from memory to a file +func (apierSv1 *APIerSv1) DumpStorDB(ctx *context.Context, ignr *string, reply *string) (err error) { + if err = apierSv1.StorDb.DumpStorDB(); err != nil { + return + } + *reply = utils.OK + return +} + +// Will rewrite every dump file of StorDB +func (apierSv1 *APIerSv1) RewriteStorDB(ctx *context.Context, ignr *string, reply *string) (err error) { + if err = apierSv1.StorDb.RewriteStorDB(); err != nil { + return + } + *reply = utils.OK + return +} + +type DumpBackupParams struct { + BackupFolderPath string // The path to the folder where the backup will be created + Zip bool // creates a zip compressing the backup +} + +// BackupDataDB will momentarely stop any dumping and rewriting in dataDB, until dump folder is backed up in folder path backupFolderPath. Making zip true will create a zip file in the path instead +func (apierSv1 *APIerSv1) BackupDataDB(ctx *context.Context, params DumpBackupParams, reply *string) (err error) { + if err = apierSv1.DataManager.DataDB().BackupDataDB(params.BackupFolderPath, params.Zip); err != nil { + return + } + *reply = utils.OK + return +} + +// BackupStorDB will momentarely stop any dumping and rewriting in storDB, until dump folder is backed up in folder path backupFolderPath. Making zip true will create a zip file in the path instead +func (apierSv1 *APIerSv1) BackupStorDB(ctx *context.Context, params DumpBackupParams, reply *string) (err error) { + if err = apierSv1.StorDb.BackupStorDB(params.BackupFolderPath, params.Zip); err != nil { + return + } + *reply = utils.OK + return +} diff --git a/apier/v1/filter_indexes.go b/apier/v1/filter_indexes.go index 095fb05c3..bed00e241 100644 --- a/apier/v1/filter_indexes.go +++ b/apier/v1/filter_indexes.go @@ -582,8 +582,7 @@ func (apierSv1 *APIerSv1) GetAccountActionPlansIndexHealth(ctx *context.Context, func (apierSv1 *APIerSv1) GetReverseDestinationsIndexHealth(ctx *context.Context, args *engine.IndexHealthArgsWith2Ch, reply *engine.ReverseDestinationsIHReply) error { rp, err := engine.GetReverseDestinationsIndexHealth(apierSv1.DataManager, args.ObjectCacheLimit, args.IndexCacheLimit, - args.ObjectCacheTTL, args.IndexCacheTTL, - args.ObjectCacheStaticTTL, args.IndexCacheStaticTTL) + args.ObjectCacheTTL, args.IndexCacheTTL, args.ObjectCacheStaticTTL, args.IndexCacheStaticTTL) if err != nil { return err } @@ -594,11 +593,11 @@ func (apierSv1 *APIerSv1) GetReverseDestinationsIndexHealth(ctx *context.Context func (apierSv1 *APIerSv1) GetReverseFilterHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *map[string]*engine.ReverseFilterIHReply) (err error) { objCaches := make(map[string]*ltcache.Cache) for indxType := range utils.CacheIndexesToPrefix { - objCaches[indxType] = ltcache.NewCache(-1, 0, false, nil) + objCaches[indxType] = ltcache.NewCache(-1, 0, false, false, nil) } *reply, err = engine.GetRevFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), objCaches, ) return @@ -606,9 +605,9 @@ func (apierSv1 *APIerSv1) GetReverseFilterHealth(ctx *context.Context, args *eng func (apierSv1 *APIerSv1) GetThresholdsIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheThresholdFilterIndexes, ) if err != nil { @@ -620,9 +619,9 @@ func (apierSv1 *APIerSv1) GetThresholdsIndexesHealth(ctx *context.Context, args func (apierSv1 *APIerSv1) GetResourcesIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheResourceFilterIndexes, ) if err != nil { @@ -634,9 +633,9 @@ func (apierSv1 *APIerSv1) GetResourcesIndexesHealth(ctx *context.Context, args * func (apierSv1 *APIerSv1) GetStatsIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheStatFilterIndexes, ) if err != nil { @@ -648,9 +647,9 @@ func (apierSv1 *APIerSv1) GetStatsIndexesHealth(ctx *context.Context, args *engi func (apierSv1 *APIerSv1) GetRoutesIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheRouteFilterIndexes, ) if err != nil { @@ -662,9 +661,9 @@ func (apierSv1 *APIerSv1) GetRoutesIndexesHealth(ctx *context.Context, args *eng func (apierSv1 *APIerSv1) GetAttributesIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheAttributeFilterIndexes, ) if err != nil { @@ -676,9 +675,9 @@ func (apierSv1 *APIerSv1) GetAttributesIndexesHealth(ctx *context.Context, args func (apierSv1 *APIerSv1) GetChargersIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheChargerFilterIndexes, ) if err != nil { @@ -690,9 +689,9 @@ func (apierSv1 *APIerSv1) GetChargersIndexesHealth(ctx *context.Context, args *e func (apierSv1 *APIerSv1) GetDispatchersIndexesHealth(ctx *context.Context, args *engine.IndexHealthArgsWith3Ch, reply *engine.FilterIHReply) error { rp, err := engine.GetFltrIdxHealth(apierSv1.DataManager, - ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, nil), - ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, nil), - ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, nil), + ltcache.NewCache(args.FilterCacheLimit, args.FilterCacheTTL, args.FilterCacheStaticTTL, false, nil), + ltcache.NewCache(args.IndexCacheLimit, args.IndexCacheTTL, args.IndexCacheStaticTTL, false, nil), + ltcache.NewCache(args.ObjectCacheLimit, args.ObjectCacheTTL, args.ObjectCacheStaticTTL, false, nil), utils.CacheDispatcherFilterIndexes, ) if err != nil { diff --git a/config/config_defaults.go b/config/config_defaults.go index 3be2dd8bf..a4b312bf5 100644 --- a/config/config_defaults.go +++ b/config/config_defaults.go @@ -140,6 +140,12 @@ const CGRATES_CFG_JSON = ` "*sessions_backup": {"limit": -1, "ttl": "", "static_ttl": false, "remote":false, "replicate":false}, }, "opts":{ + "internalDBDumpPath": "/var/lib/cgrates/internal_db/datadb", // the path where datadb will be dumped + "internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/datadb", // default path taken by APIerSv1.BackupDataDBDump when "BackupFolderPath" is not provided + "internalDBStartTimeout": "5m", // the amount of wait time until timeout for DB startup + "internalDBDumpInterval": "0s", // dump datadb regularly to a file: "0" - disables it; "-1" - dump on each set/remove; <""|$dur> + "internalDBRewriteInterval": "0s", // rewrite dump files regularly: "0" - disables it; "-1" - rewrite on engine start; "-2" - rewrite on engine shutdown; <""|$dur> + "internalDBWriteLimit": 100, // maximum size in MiB that can be written in a singular dump file "redisMaxConns": 10, // the connection pool size "redisConnectAttempts": 20, // the maximum amount of dial attempts "redisSentinel": "", // the name of sentinel when used @@ -171,6 +177,12 @@ const CGRATES_CFG_JSON = ` "string_indexed_fields": [], // indexes on cdrs table to speed up queries, used in case of *mongo and *internal "prefix_indexed_fields":[], // prefix indexes on cdrs table to speed up queries, used in case of *internal "opts": { + "internalDBDumpPath": "/var/lib/cgrates/internal_db/stordb", // the path where stordb will be dumped + "internalDBBackupPath": "/var/lib/cgrates/internal_db/backup/stordb", // default path taken by APIerSv1.BackupStorDBDump when "BackupFolderPath" is not provided + "internalDBStartTimeout": "5m", // the amount of wait time until timeout for DB startup + "internalDBDumpInterval": "0s", // dump datadb regularly to a file: "0" - disables it; "-1" - dump on each set/remove; <""|$dur> + "internalDBRewriteInterval": "0s", // rewrite dump files regularly: "0" - disables it; "-1" - rewrite on engine start; "-2" - rewrite on engine shutdown; <""|$dur> + "internalDBWriteLimit": 100, // maximum size in MiB that can be written in a singular dump file "sqlMaxOpenConns": 100, // maximum database connections opened, not applying for mongo "sqlMaxIdleConns": 10, // maximum database connections idle, not applying for mongo "sqlLogLevel": 3, // sql logger verbosity: 1=Silent, 2=Error, 3=Warn, 4=Info diff --git a/config/configsanity.go b/config/configsanity.go index 43d7e6391..3c54b8549 100644 --- a/config/configsanity.go +++ b/config/configsanity.go @@ -1011,6 +1011,13 @@ func (cfg *CGRConfig) checkConfigSanity() error { } } // StorDB sanity checks + if cfg.storDbCfg.Type == utils.MetaInternal && + (cfg.storDbCfg.Opts.InternalDBDumpInterval != 0 || + cfg.storDbCfg.Opts.InternalDBRewriteInterval != 0) && + cfg.storDbCfg.Opts.InternalDBWriteLimit <= 0 { + return fmt.Errorf("<%s> internalDBWriteLimit field cannot be equal or smaller than 0: <%v>", utils.StorDB, + cfg.storDbCfg.Opts.InternalDBWriteLimit) + } if cfg.storDbCfg.Type == utils.MetaPostgres { if !slices.Contains([]string{utils.PgSSLModeDisable, utils.PgSSLModeAllow, utils.PgSSLModePrefer, utils.PgSSLModeRequire, utils.PgSSLModeVerifyCA, @@ -1029,18 +1036,12 @@ func (cfg *CGRConfig) checkConfigSanity() error { return fmt.Errorf("<%s> %s needs to be 0 when DataBD is *internal, received : %d", utils.CacheS, key, config.Limit) } } - if cfg.resourceSCfg.Enabled && cfg.resourceSCfg.StoreInterval != -1 { - return fmt.Errorf("<%s> the StoreInterval field needs to be -1 when DataBD is *internal, received : %d", utils.ResourceS, cfg.resourceSCfg.StoreInterval) + if (cfg.dataDbCfg.Opts.InternalDBDumpInterval != 0 || + cfg.dataDbCfg.Opts.InternalDBRewriteInterval != 0) && + cfg.dataDbCfg.Opts.InternalDBWriteLimit <= 0 { + return fmt.Errorf("<%s> internalDBWriteLimit field cannot be equal or smaller than 0: <%v>", utils.DataDB, + cfg.dataDbCfg.Opts.InternalDBWriteLimit) } - if cfg.statsCfg.Enabled && cfg.statsCfg.StoreInterval != -1 { - return fmt.Errorf("<%s> the StoreInterval field needs to be -1 when DataBD is *internal, received : %d", utils.StatS, cfg.statsCfg.StoreInterval) - } - if cfg.thresholdSCfg.Enabled && cfg.thresholdSCfg.StoreInterval != -1 { - return fmt.Errorf("<%s> the StoreInterval field needs to be -1 when DataBD is *internal, received : %d", utils.ThresholdS, cfg.thresholdSCfg.StoreInterval) - } - // if cfg.sessionSCfg.Enabled && cfg.sessionSCfg.BackupInterval != -1 { - // return fmt.Errorf("<%s> the BackupInterval field needs to be -1 when DataBD is *internal, received : %d", utils.SessionS, cfg.sessionSCfg.BackupInterval) - // } } for item, val := range cfg.dataDbCfg.Items { if val.Remote && len(cfg.dataDbCfg.RmtConns) == 0 { diff --git a/config/datadbcfg.go b/config/datadbcfg.go index 46132fb42..8a623c396 100644 --- a/config/datadbcfg.go +++ b/config/datadbcfg.go @@ -28,23 +28,29 @@ import ( ) type DataDBOpts struct { - RedisMaxConns int - RedisConnectAttempts int - RedisSentinel string - RedisCluster bool - RedisClusterSync time.Duration - RedisClusterOndownDelay time.Duration - RedisConnectTimeout time.Duration - RedisReadTimeout time.Duration - RedisWriteTimeout time.Duration - RedisPoolPipelineWindow time.Duration - RedisPoolPipelineLimit int - RedisTLS bool - RedisClientCertificate string - RedisClientKey string - RedisCACertificate string - MongoQueryTimeout time.Duration - MongoConnScheme string + InternalDBDumpPath string // Path to the dump file + InternalDBBackupPath string // Path where db dump will backup + InternalDBStartTimeout time.Duration // Transcache recover from dump files timeout duration + InternalDBDumpInterval time.Duration // Regurarly dump database to file + InternalDBRewriteInterval time.Duration // Regurarly rewrite dump files + InternalDBWriteLimit int // maximum size in MiB that can be written in a singular dump file + RedisMaxConns int + RedisConnectAttempts int + RedisSentinel string + RedisCluster bool + RedisClusterSync time.Duration + RedisClusterOndownDelay time.Duration + RedisConnectTimeout time.Duration + RedisReadTimeout time.Duration + RedisWriteTimeout time.Duration + RedisPoolPipelineWindow time.Duration + RedisPoolPipelineLimit int + RedisTLS bool + RedisClientCertificate string + RedisClientKey string + RedisCACertificate string + MongoQueryTimeout time.Duration + MongoConnScheme string } // DataDbCfg Database config @@ -68,6 +74,30 @@ func (dbOpts *DataDBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) { if jsnCfg == nil { return } + if jsnCfg.InternalDBDumpPath != nil { + dbOpts.InternalDBDumpPath = *jsnCfg.InternalDBDumpPath + } + if jsnCfg.InternalDBBackupPath != nil { + dbOpts.InternalDBBackupPath = *jsnCfg.InternalDBBackupPath + } + if jsnCfg.InternalDBStartTimeout != nil { + if dbOpts.InternalDBStartTimeout, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBStartTimeout); err != nil { + return err + } + } + if jsnCfg.InternalDBDumpInterval != nil { + if dbOpts.InternalDBDumpInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBDumpInterval); err != nil { + return err + } + } + if jsnCfg.InternalDBRewriteInterval != nil { + if dbOpts.InternalDBRewriteInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBRewriteInterval); err != nil { + return err + } + } + if jsnCfg.InternalDBWriteLimit != nil { + dbOpts.InternalDBWriteLimit = *jsnCfg.InternalDBWriteLimit + } if jsnCfg.RedisMaxConns != nil { dbOpts.RedisMaxConns = *jsnCfg.RedisMaxConns } @@ -216,23 +246,29 @@ func (dbcfg *DataDbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) { func (dbOpts *DataDBOpts) Clone() *DataDBOpts { return &DataDBOpts{ - RedisMaxConns: dbOpts.RedisMaxConns, - RedisConnectAttempts: dbOpts.RedisConnectAttempts, - RedisSentinel: dbOpts.RedisSentinel, - RedisCluster: dbOpts.RedisCluster, - RedisClusterSync: dbOpts.RedisClusterSync, - RedisClusterOndownDelay: dbOpts.RedisClusterOndownDelay, - RedisConnectTimeout: dbOpts.RedisConnectTimeout, - RedisReadTimeout: dbOpts.RedisReadTimeout, - RedisWriteTimeout: dbOpts.RedisWriteTimeout, - RedisPoolPipelineWindow: dbOpts.RedisPoolPipelineWindow, - RedisPoolPipelineLimit: dbOpts.RedisPoolPipelineLimit, - RedisTLS: dbOpts.RedisTLS, - RedisClientCertificate: dbOpts.RedisClientCertificate, - RedisClientKey: dbOpts.RedisClientKey, - RedisCACertificate: dbOpts.RedisCACertificate, - MongoQueryTimeout: dbOpts.MongoQueryTimeout, - MongoConnScheme: dbOpts.MongoConnScheme, + InternalDBDumpPath: dbOpts.InternalDBDumpPath, + InternalDBBackupPath: dbOpts.InternalDBBackupPath, + InternalDBStartTimeout: dbOpts.InternalDBStartTimeout, + InternalDBDumpInterval: dbOpts.InternalDBDumpInterval, + InternalDBRewriteInterval: dbOpts.InternalDBRewriteInterval, + InternalDBWriteLimit: dbOpts.InternalDBWriteLimit, + RedisMaxConns: dbOpts.RedisMaxConns, + RedisConnectAttempts: dbOpts.RedisConnectAttempts, + RedisSentinel: dbOpts.RedisSentinel, + RedisCluster: dbOpts.RedisCluster, + RedisClusterSync: dbOpts.RedisClusterSync, + RedisClusterOndownDelay: dbOpts.RedisClusterOndownDelay, + RedisConnectTimeout: dbOpts.RedisConnectTimeout, + RedisReadTimeout: dbOpts.RedisReadTimeout, + RedisWriteTimeout: dbOpts.RedisWriteTimeout, + RedisPoolPipelineWindow: dbOpts.RedisPoolPipelineWindow, + RedisPoolPipelineLimit: dbOpts.RedisPoolPipelineLimit, + RedisTLS: dbOpts.RedisTLS, + RedisClientCertificate: dbOpts.RedisClientCertificate, + RedisClientKey: dbOpts.RedisClientKey, + RedisCACertificate: dbOpts.RedisCACertificate, + MongoQueryTimeout: dbOpts.MongoQueryTimeout, + MongoConnScheme: dbOpts.MongoConnScheme, } } @@ -268,23 +304,29 @@ func (dbcfg *DataDbCfg) Clone() (cln *DataDbCfg) { // AsMapInterface returns the config as a map[string]any func (dbcfg *DataDbCfg) AsMapInterface() (mp map[string]any) { opts := map[string]any{ - utils.RedisMaxConnsCfg: dbcfg.Opts.RedisMaxConns, - utils.RedisConnectAttemptsCfg: dbcfg.Opts.RedisConnectAttempts, - utils.RedisSentinelNameCfg: dbcfg.Opts.RedisSentinel, - utils.RedisClusterCfg: dbcfg.Opts.RedisCluster, - utils.RedisClusterSyncCfg: dbcfg.Opts.RedisClusterSync.String(), - utils.RedisClusterOnDownDelayCfg: dbcfg.Opts.RedisClusterOndownDelay.String(), - utils.RedisConnectTimeoutCfg: dbcfg.Opts.RedisConnectTimeout.String(), - utils.RedisReadTimeoutCfg: dbcfg.Opts.RedisReadTimeout.String(), - utils.RedisWriteTimeoutCfg: dbcfg.Opts.RedisWriteTimeout.String(), - utils.RedisPoolPipelineWindowCfg: dbcfg.Opts.RedisPoolPipelineWindow.String(), - utils.RedisPoolPipelineLimitCfg: dbcfg.Opts.RedisPoolPipelineLimit, - utils.RedisTLS: dbcfg.Opts.RedisTLS, - utils.RedisClientCertificate: dbcfg.Opts.RedisClientCertificate, - utils.RedisClientKey: dbcfg.Opts.RedisClientKey, - utils.RedisCACertificate: dbcfg.Opts.RedisCACertificate, - utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(), - utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme, + utils.InternalDBDumpPathCfg: dbcfg.Opts.InternalDBDumpPath, + utils.InternalDBBackupPathCfg: dbcfg.Opts.InternalDBBackupPath, + utils.InternalDBStartTimeoutCfg: dbcfg.Opts.InternalDBStartTimeout, + utils.InternalDBDumpIntervalCfg: dbcfg.Opts.InternalDBDumpInterval, + utils.InternalDBRewriteIntervalCfg: dbcfg.Opts.InternalDBRewriteInterval, + utils.InternalDBWriteLimitCfg: dbcfg.Opts.InternalDBWriteLimit, + utils.RedisMaxConnsCfg: dbcfg.Opts.RedisMaxConns, + utils.RedisConnectAttemptsCfg: dbcfg.Opts.RedisConnectAttempts, + utils.RedisSentinelNameCfg: dbcfg.Opts.RedisSentinel, + utils.RedisClusterCfg: dbcfg.Opts.RedisCluster, + utils.RedisClusterSyncCfg: dbcfg.Opts.RedisClusterSync.String(), + utils.RedisClusterOnDownDelayCfg: dbcfg.Opts.RedisClusterOndownDelay.String(), + utils.RedisConnectTimeoutCfg: dbcfg.Opts.RedisConnectTimeout.String(), + utils.RedisReadTimeoutCfg: dbcfg.Opts.RedisReadTimeout.String(), + utils.RedisWriteTimeoutCfg: dbcfg.Opts.RedisWriteTimeout.String(), + utils.RedisPoolPipelineWindowCfg: dbcfg.Opts.RedisPoolPipelineWindow.String(), + utils.RedisPoolPipelineLimitCfg: dbcfg.Opts.RedisPoolPipelineLimit, + utils.RedisTLS: dbcfg.Opts.RedisTLS, + utils.RedisClientCertificate: dbcfg.Opts.RedisClientCertificate, + utils.RedisClientKey: dbcfg.Opts.RedisClientKey, + utils.RedisCACertificate: dbcfg.Opts.RedisCACertificate, + utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(), + utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme, } mp = map[string]any{ utils.DataDbTypeCfg: dbcfg.Type, diff --git a/config/libconfig_json.go b/config/libconfig_json.go index a50ab8c41..53dd7a675 100644 --- a/config/libconfig_json.go +++ b/config/libconfig_json.go @@ -105,36 +105,42 @@ type TlsJsonCfg struct { } type DBOptsJson struct { - RedisMaxConns *int `json:"redisMaxConns"` - RedisConnectAttempts *int `json:"redisConnectAttempts"` - RedisSentinel *string `json:"redisSentinel"` - RedisCluster *bool `json:"redisCluster"` - RedisClusterSync *string `json:"redisClusterSync"` - RedisClusterOndownDelay *string `json:"redisClusterOndownDelay"` - RedisConnectTimeout *string `json:"redisConnectTimeout"` - RedisReadTimeout *string `json:"redisReadTimeout"` - RedisWriteTimeout *string `json:"redisWriteTimeout"` - RedisPoolPipelineWindow *string `json:"redisPoolPipelineWindow"` - RedisPoolPipelineLimit *int `json:"redisPoolPipelineLimit"` - RedisTLS *bool `json:"redisTLS"` - RedisClientCertificate *string `json:"redisClientCertificate"` - RedisClientKey *string `json:"redisClientKey"` - RedisCACertificate *string `json:"redisCACertificate"` - MongoQueryTimeout *string `json:"mongoQueryTimeout"` - MongoConnScheme *string `json:"mongoConnScheme"` - SQLMaxOpenConns *int `json:"sqlMaxOpenConns"` - SQLMaxIdleConns *int `json:"sqlMaxIdleConns"` - SQLConnMaxLifetime *string `json:"sqlConnMaxLifetime"` - SQLLogLevel *int `json:"sqlLogLevel"` - MySQLDSNParams map[string]string `json:"mysqlDSNParams"` - PgSSLMode *string `json:"pgSSLMode"` - PgSSLCert *string `json:"pgSSLCert"` - PgSSLKey *string `json:"pgSSLKey"` - PgSSLPassword *string `json:"pgSSLPassword"` - PgSSLCertMode *string `json:"pgSSLCertMode"` - PgSSLRootCert *string `json:"pgSSLRootCert"` - PgSchema *string `json:"pgSchema"` - MySQLLocation *string `json:"mysqlLocation"` + InternalDBDumpPath *string `json:"internalDBDumpPath"` + InternalDBBackupPath *string `json:"internalDBBackupPath"` + InternalDBStartTimeout *string `json:"internalDBStartTimeout"` + InternalDBDumpInterval *string `json:"internalDBDumpInterval"` + InternalDBRewriteInterval *string `json:"internalDBRewriteInterval"` + InternalDBWriteLimit *int `json:"internalDBWriteLimit"` + RedisMaxConns *int `json:"redisMaxConns"` + RedisConnectAttempts *int `json:"redisConnectAttempts"` + RedisSentinel *string `json:"redisSentinel"` + RedisCluster *bool `json:"redisCluster"` + RedisClusterSync *string `json:"redisClusterSync"` + RedisClusterOndownDelay *string `json:"redisClusterOndownDelay"` + RedisConnectTimeout *string `json:"redisConnectTimeout"` + RedisReadTimeout *string `json:"redisReadTimeout"` + RedisWriteTimeout *string `json:"redisWriteTimeout"` + RedisPoolPipelineWindow *string `json:"redisPoolPipelineWindow"` + RedisPoolPipelineLimit *int `json:"redisPoolPipelineLimit"` + RedisTLS *bool `json:"redisTLS"` + RedisClientCertificate *string `json:"redisClientCertificate"` + RedisClientKey *string `json:"redisClientKey"` + RedisCACertificate *string `json:"redisCACertificate"` + MongoQueryTimeout *string `json:"mongoQueryTimeout"` + MongoConnScheme *string `json:"mongoConnScheme"` + SQLMaxOpenConns *int `json:"sqlMaxOpenConns"` + SQLMaxIdleConns *int `json:"sqlMaxIdleConns"` + SQLConnMaxLifetime *string `json:"sqlConnMaxLifetime"` + SQLLogLevel *int `json:"sqlLogLevel"` + MySQLDSNParams map[string]string `json:"mysqlDSNParams"` + PgSSLMode *string `json:"pgSSLMode"` + PgSSLCert *string `json:"pgSSLCert"` + PgSSLKey *string `json:"pgSSLKey"` + PgSSLPassword *string `json:"pgSSLPassword"` + PgSSLCertMode *string `json:"pgSSLCertMode"` + PgSSLRootCert *string `json:"pgSSLRootCert"` + PgSchema *string `json:"pgSchema"` + MySQLLocation *string `json:"mysqlLocation"` } // Database config diff --git a/config/stordbcfg.go b/config/stordbcfg.go index 89371535b..a5e093986 100644 --- a/config/stordbcfg.go +++ b/config/stordbcfg.go @@ -28,21 +28,27 @@ import ( ) type StorDBOpts struct { - SQLMaxOpenConns int - SQLMaxIdleConns int - SQLConnMaxLifetime time.Duration - SQLLogLevel int - MongoQueryTimeout time.Duration - MongoConnScheme string - PgSSLMode string - PgSSLCert string - PgSSLKey string - PgSSLPassword string - PgSSLCertMode string - PgSSLRootCert string - PgSchema string - MySQLLocation string - MySQLDSNParams map[string]string + InternalDBDumpPath string // Path to the dump file + InternalDBBackupPath string // Path where db dump will backup + InternalDBStartTimeout time.Duration // Transcache recover from dump files timeout duration + InternalDBDumpInterval time.Duration // Regurarly dump database to file + InternalDBRewriteInterval time.Duration // Regurarly rewrite dump files + InternalDBWriteLimit int // maximum size in MiB that can be written in a singular dump file + SQLMaxOpenConns int + SQLMaxIdleConns int + SQLConnMaxLifetime time.Duration + SQLLogLevel int + MongoQueryTimeout time.Duration + MongoConnScheme string + PgSSLMode string + PgSSLCert string + PgSSLKey string + PgSSLPassword string + PgSSLCertMode string + PgSSLRootCert string + PgSchema string + MySQLLocation string + MySQLDSNParams map[string]string } // StorDbCfg StroreDb config @@ -65,6 +71,30 @@ func (dbOpts *StorDBOpts) loadFromJSONCfg(jsnCfg *DBOptsJson) (err error) { if jsnCfg == nil { return } + if jsnCfg.InternalDBDumpPath != nil { + dbOpts.InternalDBDumpPath = *jsnCfg.InternalDBDumpPath + } + if jsnCfg.InternalDBBackupPath != nil { + dbOpts.InternalDBBackupPath = *jsnCfg.InternalDBBackupPath + } + if jsnCfg.InternalDBStartTimeout != nil { + if dbOpts.InternalDBStartTimeout, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBStartTimeout); err != nil { + return err + } + } + if jsnCfg.InternalDBDumpInterval != nil { + if dbOpts.InternalDBDumpInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBDumpInterval); err != nil { + return err + } + } + if jsnCfg.InternalDBRewriteInterval != nil { + if dbOpts.InternalDBRewriteInterval, err = utils.ParseDurationWithNanosecs(*jsnCfg.InternalDBRewriteInterval); err != nil { + return err + } + } + if jsnCfg.InternalDBWriteLimit != nil { + dbOpts.InternalDBWriteLimit = *jsnCfg.InternalDBWriteLimit + } if jsnCfg.SQLMaxOpenConns != nil { dbOpts.SQLMaxOpenConns = *jsnCfg.SQLMaxOpenConns } @@ -190,21 +220,27 @@ func (dbcfg *StorDbCfg) loadFromJSONCfg(jsnDbCfg *DbJsonCfg) (err error) { func (dbOpts *StorDBOpts) Clone() *StorDBOpts { return &StorDBOpts{ - SQLMaxOpenConns: dbOpts.SQLMaxOpenConns, - SQLMaxIdleConns: dbOpts.SQLMaxIdleConns, - SQLConnMaxLifetime: dbOpts.SQLConnMaxLifetime, - SQLLogLevel: dbOpts.SQLLogLevel, - MySQLDSNParams: dbOpts.MySQLDSNParams, - MongoQueryTimeout: dbOpts.MongoQueryTimeout, - MongoConnScheme: dbOpts.MongoConnScheme, - PgSSLMode: dbOpts.PgSSLMode, - PgSSLCert: dbOpts.PgSSLCert, - PgSSLKey: dbOpts.PgSSLKey, - PgSSLPassword: dbOpts.PgSSLPassword, - PgSSLCertMode: dbOpts.PgSSLCertMode, - PgSSLRootCert: dbOpts.PgSSLRootCert, - PgSchema: dbOpts.PgSchema, - MySQLLocation: dbOpts.MySQLLocation, + InternalDBDumpPath: dbOpts.InternalDBDumpPath, + InternalDBBackupPath: dbOpts.InternalDBBackupPath, + InternalDBStartTimeout: dbOpts.InternalDBStartTimeout, + InternalDBDumpInterval: dbOpts.InternalDBDumpInterval, + InternalDBRewriteInterval: dbOpts.InternalDBRewriteInterval, + InternalDBWriteLimit: dbOpts.InternalDBWriteLimit, + SQLMaxOpenConns: dbOpts.SQLMaxOpenConns, + SQLMaxIdleConns: dbOpts.SQLMaxIdleConns, + SQLConnMaxLifetime: dbOpts.SQLConnMaxLifetime, + SQLLogLevel: dbOpts.SQLLogLevel, + MySQLDSNParams: dbOpts.MySQLDSNParams, + MongoQueryTimeout: dbOpts.MongoQueryTimeout, + MongoConnScheme: dbOpts.MongoConnScheme, + PgSSLMode: dbOpts.PgSSLMode, + PgSSLCert: dbOpts.PgSSLCert, + PgSSLKey: dbOpts.PgSSLKey, + PgSSLPassword: dbOpts.PgSSLPassword, + PgSSLCertMode: dbOpts.PgSSLCertMode, + PgSSLRootCert: dbOpts.PgSSLRootCert, + PgSchema: dbOpts.PgSchema, + MySQLLocation: dbOpts.MySQLLocation, } } @@ -246,16 +282,22 @@ func (dbcfg *StorDbCfg) Clone() (cln *StorDbCfg) { // AsMapInterface returns the config as a map[string]any func (dbcfg *StorDbCfg) AsMapInterface() (mp map[string]any) { opts := map[string]any{ - utils.SQLMaxOpenConnsCfg: dbcfg.Opts.SQLMaxOpenConns, - utils.SQLMaxIdleConnsCfg: dbcfg.Opts.SQLMaxIdleConns, - utils.SQLConnMaxLifetime: dbcfg.Opts.SQLConnMaxLifetime.String(), - utils.SQLLogLevel: dbcfg.Opts.SQLLogLevel, - utils.MYSQLDSNParams: dbcfg.Opts.MySQLDSNParams, - utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(), - utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme, - utils.PgSSLModeCfg: dbcfg.Opts.PgSSLMode, - utils.PgSchema: dbcfg.Opts.PgSchema, - utils.MysqlLocation: dbcfg.Opts.MySQLLocation, + utils.InternalDBDumpPathCfg: dbcfg.Opts.InternalDBDumpPath, + utils.InternalDBBackupPathCfg: dbcfg.Opts.InternalDBBackupPath, + utils.InternalDBStartTimeoutCfg: dbcfg.Opts.InternalDBStartTimeout, + utils.InternalDBDumpIntervalCfg: dbcfg.Opts.InternalDBDumpInterval, + utils.InternalDBRewriteIntervalCfg: dbcfg.Opts.InternalDBRewriteInterval, + utils.InternalDBWriteLimitCfg: dbcfg.Opts.InternalDBWriteLimit, + utils.SQLMaxOpenConnsCfg: dbcfg.Opts.SQLMaxOpenConns, + utils.SQLMaxIdleConnsCfg: dbcfg.Opts.SQLMaxIdleConns, + utils.SQLConnMaxLifetime: dbcfg.Opts.SQLConnMaxLifetime.String(), + utils.SQLLogLevel: dbcfg.Opts.SQLLogLevel, + utils.MYSQLDSNParams: dbcfg.Opts.MySQLDSNParams, + utils.MongoQueryTimeoutCfg: dbcfg.Opts.MongoQueryTimeout.String(), + utils.MongoConnSchemeCfg: dbcfg.Opts.MongoConnScheme, + utils.PgSSLModeCfg: dbcfg.Opts.PgSSLMode, + utils.PgSchema: dbcfg.Opts.PgSchema, + utils.MysqlLocation: dbcfg.Opts.MySQLLocation, } if dbcfg.Opts.PgSSLCert != "" { opts[utils.PgSSLCertCfg] = dbcfg.Opts.PgSSLCert diff --git a/data/ansible/roles/go/defaults/main.yaml b/data/ansible/roles/go/defaults/main.yaml index b999bb5be..a8b2a6cf6 100644 --- a/data/ansible/roles/go/defaults/main.yaml +++ b/data/ansible/roles/go/defaults/main.yaml @@ -1,5 +1,5 @@ --- -go_version: 1.24.0 +go_version: 1.24.1 go_platform: linux go_arch: amd64 go_tarball: go{{ go_version }}.{{ go_platform }}-{{ go_arch }}.tar.gz diff --git a/data/conf/samples/fs_offline_internal/cgrates.json b/data/conf/samples/fs_offline_internal/cgrates.json new file mode 100644 index 000000000..1096a20c7 --- /dev/null +++ b/data/conf/samples/fs_offline_internal/cgrates.json @@ -0,0 +1,130 @@ +{ + +// Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments +// Copyright (C) ITsysCOM GmbH + +"general": { + "log_level": 7, + "node_id":"CGRFreeswitch", +}, + + +"listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080", +}, + + +"data_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-2", + "internalDBWriteLimit": 10 + } +}, + + +"stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-2", + "internalDBWriteLimit": 10 + } +}, + +"schedulers": { + "enabled": true, +}, + + +"rals": { + "enabled": true, + "thresholds_conns": ["*localhost"], + "stats_conns": ["*localhost"], +}, + + +"cdrs": { + "enabled": true, + "stats_conns": ["*localhost"], + "chargers_conns": ["*internal"], + "rals_conns": ["*localhost"], + "sessions_cost_retries": 5, +}, + + +"chargers": { + "enabled": true, + "attributes_conns": ["*internal"], +}, + + +"sessions": { + "enabled": true, + "rals_conns": ["*localhost"], + "cdrs_conns": ["*localhost"], + "resources_conns": ["*localhost"], + "routes_conns": ["*localhost"], + "attributes_conns": ["*localhost"], + "stats_conns": ["*localhost"], + "thresholds_conns": ["*localhost"], + "chargers_conns": ["*internal"], + "debit_interval": "5s", + "channel_sync_interval":"7s", + "backup_interval": "500ms", +}, + + +"freeswitch_agent": { + "enabled": true, + "event_socket_conns":[ + {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": -1,"alias":""} + ], + "sessions_conns": ["*birpc_internal"], + "create_cdr": true +}, + + +"attributes": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"resources": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"stats": { + "enabled": true, + "string_indexed_fields": ["*req.Account","*req.RunID","*req.Destination"], +}, + + +"thresholds": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"routes": { + "enabled": true, + "resources_conns": ["*internal"], + "stats_conns": ["*internal"], + "rals_conns": ["*internal"], + "string_indexed_fields": ["*req.Account"], +}, + + +"apiers": { + "enabled": true, + "scheduler_conns": ["*internal"], +}, + + +} diff --git a/data/conf/samples/fs_offline_mysql/cgrates.json b/data/conf/samples/fs_offline_mysql/cgrates.json new file mode 100644 index 000000000..af3c9a805 --- /dev/null +++ b/data/conf/samples/fs_offline_mysql/cgrates.json @@ -0,0 +1,110 @@ +{ + +// Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments +// Copyright (C) ITsysCOM GmbH + +"general": { + "log_level": 7, + "node_id":"CGRFreeswitch", +}, + + +"listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080", +}, + +"schedulers": { + "enabled": true, +}, + + +"rals": { + "enabled": true, + "thresholds_conns": ["*localhost"], + "stats_conns": ["*localhost"], +}, + + +"cdrs": { + "enabled": true, + "stats_conns": ["*localhost"], + "chargers_conns": ["*internal"], + "rals_conns": ["*localhost"], + "sessions_cost_retries": 5, +}, + + +"chargers": { + "enabled": true, + "attributes_conns": ["*internal"], +}, + + +"sessions": { + "enabled": true, + "rals_conns": ["*localhost"], + "cdrs_conns": ["*localhost"], + "resources_conns": ["*localhost"], + "routes_conns": ["*localhost"], + "attributes_conns": ["*localhost"], + "stats_conns": ["*localhost"], + "thresholds_conns": ["*localhost"], + "chargers_conns": ["*internal"], + "debit_interval": "5s", + "channel_sync_interval":"7s", + "backup_interval": "500ms", +}, + + +"freeswitch_agent": { + "enabled": true, + "event_socket_conns":[ + {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": -1,"alias":""} + ], + "sessions_conns": ["*birpc_internal"], + "create_cdr": true +}, + + +"attributes": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"resources": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"stats": { + "enabled": true, + "string_indexed_fields": ["*req.Account","*req.RunID","*req.Destination"], +}, + + +"thresholds": { + "enabled": true, + "string_indexed_fields": ["*req.Account"], +}, + + +"routes": { + "enabled": true, + "resources_conns": ["*internal"], + "stats_conns": ["*internal"], + "rals_conns": ["*internal"], + "string_indexed_fields": ["*req.Account"], +}, + + +"apiers": { + "enabled": true, + "scheduler_conns": ["*internal"], +}, + + +} diff --git a/data/conf/samples/offline_internal/cgrates.json b/data/conf/samples/offline_internal/cgrates.json new file mode 100644 index 000000000..b788309e1 --- /dev/null +++ b/data/conf/samples/offline_internal/cgrates.json @@ -0,0 +1,128 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_limit/cgrates.json b/data/conf/samples/offline_internal_limit/cgrates.json new file mode 100644 index 000000000..d8323eea7 --- /dev/null +++ b/data/conf/samples/offline_internal_limit/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms/cgrates.json b/data/conf/samples/offline_internal_ms/cgrates.json new file mode 100644 index 000000000..333057381 --- /dev/null +++ b/data/conf/samples/offline_internal_ms/cgrates.json @@ -0,0 +1,128 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms_limit/cgrates.json b/data/conf/samples/offline_internal_ms_limit/cgrates.json new file mode 100644 index 000000000..f4cbc4040 --- /dev/null +++ b/data/conf/samples/offline_internal_ms_limit/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms_rewrite/cgrates.json b/data/conf/samples/offline_internal_ms_rewrite/cgrates.json new file mode 100644 index 000000000..2f531c70e --- /dev/null +++ b/data/conf/samples/offline_internal_ms_rewrite/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "-1", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "-1", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms_rewrite_limit/cgrates.json b/data/conf/samples/offline_internal_ms_rewrite_limit/cgrates.json new file mode 100644 index 000000000..60cd5ed05 --- /dev/null +++ b/data/conf/samples/offline_internal_ms_rewrite_limit/cgrates.json @@ -0,0 +1,132 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms_rewrite_ms/cgrates.json b/data/conf/samples/offline_internal_ms_rewrite_ms/cgrates.json new file mode 100644 index 000000000..f7d22cff0 --- /dev/null +++ b/data/conf/samples/offline_internal_ms_rewrite_ms/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "500ms", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "500ms", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_ms_rewrite_ms_limit/cgrates.json b/data/conf/samples/offline_internal_ms_rewrite_ms_limit/cgrates.json new file mode 100644 index 000000000..d4720f8e7 --- /dev/null +++ b/data/conf/samples/offline_internal_ms_rewrite_ms_limit/cgrates.json @@ -0,0 +1,126 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "500ms", + "internalDBRewriteInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_rewrite/cgrates.json b/data/conf/samples/offline_internal_rewrite/cgrates.json new file mode 100644 index 000000000..dbefc94bc --- /dev/null +++ b/data/conf/samples/offline_internal_rewrite/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-1", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-1", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_rewrite_limit/cgrates.json b/data/conf/samples/offline_internal_rewrite_limit/cgrates.json new file mode 100644 index 000000000..a17ab3856 --- /dev/null +++ b/data/conf/samples/offline_internal_rewrite_limit/cgrates.json @@ -0,0 +1,132 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "-1", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_rewrite_ms/cgrates.json b/data/conf/samples/offline_internal_rewrite_ms/cgrates.json new file mode 100644 index 000000000..737e9cba1 --- /dev/null +++ b/data/conf/samples/offline_internal_rewrite_ms/cgrates.json @@ -0,0 +1,130 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "500ms", + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "500ms", + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/offline_internal_rewrite_ms_limit/cgrates.json b/data/conf/samples/offline_internal_rewrite_ms_limit/cgrates.json new file mode 100644 index 000000000..1f0642c48 --- /dev/null +++ b/data/conf/samples/offline_internal_rewrite_ms_limit/cgrates.json @@ -0,0 +1,132 @@ +{ + + + + "general": { + "node_id":"offlineWithBackup", + "log_level": 7, + }, + + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + // "cores": { + // "shutdown_timeout": "10s" + // }, + + + "data_db": { + "db_type": "*internal", + "items":{ + "*charger_profiles": {"limit": -1, "ttl": "1s", "static_ttl": false, "remote":false, "replicate":false} + }, + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpInterval": "-1", + "internalDBRewriteInterval": "500ms", + "internalDBWriteLimit": 1 + } + }, + + "rals": { + "enabled": true, + "thresholds_conns": ["*internal"], + "max_increments":3000000 + }, + + + "schedulers": { + "enabled": true, + "cdrs_conns": ["*internal"], + "stats_conns": ["*internal"] + }, + + + "cdrs": { + "enabled": true, + "chargers_conns":["*internal"] + }, + + + "attributes": { + "enabled": true, + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + }, + + + "chargers": { + "enabled": true, + "attributes_conns": ["*internal"] + }, + + + "resources": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + + "stats": { + "enabled": true, + "store_interval": "-1", + "thresholds_conns": ["*internal"] + }, + + "thresholds": { + "enabled": true, + "store_interval": "-1" + }, + + + "routes": { + "enabled": true, + "prefix_indexed_fields":["*req.Destination"], + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "rals_conns": ["*internal"] + }, + + + "sessions": { + "enabled": true, + "routes_conns": ["*internal"], + "resources_conns": ["*internal"], + "attributes_conns": ["*internal"], + "rals_conns": ["*internal"], + "cdrs_conns": ["*internal"], + "chargers_conns": ["*internal"], + "backup_interval": "-1" + }, + + + "apiers": { + "enabled": true, + "scheduler_conns": ["*internal"] + }, + + + "filters": { + "stats_conns": ["*internal"], + "resources_conns": ["*internal"], + "apiers_conns": ["*internal"] + } + + + } + \ No newline at end of file diff --git a/data/conf/samples/reprocess_cdrs_stats_ees_internal/cgrates.json b/data/conf/samples/reprocess_cdrs_stats_ees_internal/cgrates.json new file mode 100644 index 000000000..dabef13d4 --- /dev/null +++ b/data/conf/samples/reprocess_cdrs_stats_ees_internal/cgrates.json @@ -0,0 +1,93 @@ +{ + "general": { + "log_level": 7, + "reply_timeout": "50s" + }, + + "listen": { + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080" + }, + + "data_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/datadb", + "internalDBDumpInterval": "-1", + } + }, + + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/stordb", + "internalDBDumpInterval": "-1", + } + }, + + "rals": { + "enabled": true + }, + + "cdrs": { + "enabled": true, + "chargers_conns": ["*internal"], + "ees_conns": ["*localhost"], + "stats_conns": ["*localhost"], + "rals_conns": ["*localhost"] + }, + + "chargers": { + "enabled": true + }, + + "stats": { + "enabled": true, + "store_interval": "-1", + "ees_conns": ["*localhost"], + "ees_exporter_ids": ["exporter1"] + }, + + "sessions": { + "enabled": true, + "rals_conns": ["*internal"], + "cdrs_conns": ["*localhost"], + "chargers_conns": ["*internal"] + }, + + "ees": { + "enabled": true, + "exporters": [{ + "id": "exporter1", + "type": "*file_csv", + "export_path": "/tmp/testCSV", + "filters": ["*gt:~*eventTimestamp:2024-09-19T14:00:58+02:00"], + "attempts": 1, + "synchronous": true, + "field_separator": ",", + "fields": [{ + "tag": "StatID","path": "*exp.StatID", + "type": "*variable","value": "~*req.StatID" + }, + { + "tag": "TotalCallDuration","path": "*exp.TotalCallDuration", + "type": "*variable","value": "~*req.Metrics.*tcd" + }, + { + "tag": "TotalCallCost","path": "*exp.TotalCallCost", + "type": "*variable","value": "~*req.Metrics.*tcc" + }, + { + "tag": "NumberOfCDRs","path": "*exp.NumberOfCDRs", + "type": "*variable","value": "~*req.Metrics.*sum#1" + } + ] + }] + }, + + "apiers": { + "enabled": true + } + +} \ No newline at end of file diff --git a/data/conf/samples/rerate_cdrs_internal/cgrates.json b/data/conf/samples/rerate_cdrs_internal/cgrates.json index 24de8ccca..1c866737c 100644 --- a/data/conf/samples/rerate_cdrs_internal/cgrates.json +++ b/data/conf/samples/rerate_cdrs_internal/cgrates.json @@ -1,24 +1,36 @@ { -"data_db": { - "db_type": "*internal" -}, + "general": { + "log_level": 7, + }, -"stor_db": { - "db_type": "*internal" -}, + "data_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/datadb", + "internalDBDumpInterval": "-1", + } + }, -"rals": { - "enabled": true -}, + "stor_db": { + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/stordb", + "internalDBDumpInterval": "-1", + } + }, -"cdrs": { - "enabled": true, - "rals_conns": ["*internal"] -}, + "rals": { + "enabled": true + }, -"apiers": { - "enabled": true -} + "cdrs": { + "enabled": true, + "rals_conns": ["*internal"] + }, + + "apiers": { + "enabled": true + } } \ No newline at end of file diff --git a/data/conf/samples/rerate_cdrs_mysql/cgrates.json b/data/conf/samples/rerate_cdrs_mysql/cgrates.json index a81981c30..032732ca3 100644 --- a/data/conf/samples/rerate_cdrs_mysql/cgrates.json +++ b/data/conf/samples/rerate_cdrs_mysql/cgrates.json @@ -1,19 +1,20 @@ { + "general": { "log_level": 7 }, -"rals": { - "enabled": true -}, - -"cdrs": { - "enabled": true, - "rals_conns": ["*internal"] -}, - -"apiers": { - "enabled": true -} -} - \ No newline at end of file + "rals": { + "enabled": true + }, + + "cdrs": { + "enabled": true, + "rals_conns": ["*internal"] + }, + + "apiers": { + "enabled": true + } + +} \ No newline at end of file diff --git a/data/conf/samples/sessions_backup_interval_internal/cgrates.json b/data/conf/samples/sessions_backup_interval_internal/cgrates.json index 163321e36..5be1bd9e5 100644 --- a/data/conf/samples/sessions_backup_interval_internal/cgrates.json +++ b/data/conf/samples/sessions_backup_interval_internal/cgrates.json @@ -12,12 +12,23 @@ }, "data_db": { - "db_type": "internal", + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/datadb", + "internalDBDumpInterval": "500ms", + "internalDBWriteLimit": 100, // 100 Mb per file + "internalDBRewriteInterval": "1s", // rewriting needed so the files size doesnt multiply since we remove and store again the 500 sessions 1000 times in the test + } }, - "stor_db": { - "db_type": "internal", + "db_type": "*internal", + "opts":{ + "internalDBDumpPath": "/tmp/internal_db/stordb", + "internalDBDumpInterval": "-1", + "internalDBWriteLimit": 1024, + "internalDBRewriteInterval": "-2", //rewrite on shutdown + } }, "rals": { diff --git a/data/docker/integration/Dockerfile b/data/docker/integration/Dockerfile index 9c6d39182..7dd66f684 100644 --- a/data/docker/integration/Dockerfile +++ b/data/docker/integration/Dockerfile @@ -74,7 +74,7 @@ RUN apt-get clean && \ RUN touch /logs/mariadb.log /logs/mariadb_script.log /logs/rabbitmq.log RUN chmod 777 /logs/mariadb.log /logs/mariadb_script.log /logs/rabbitmq.log -RUN wget -O go.tgz "https://storage.googleapis.com/golang/go1.24.0.linux-amd64.tar.gz" --progress=dot:giga +RUN wget -O go.tgz "https://storage.googleapis.com/golang/go1.24.1.linux-amd64.tar.gz" --progress=dot:giga RUN tar -C /usr/local -xzf go.tgz RUN rm go.tgz diff --git a/docs/installation.rst b/docs/installation.rst index e8a356e51..369a939de 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -146,8 +146,8 @@ To install the latest Go version at the time of writing this documentation, run: # sudo dnf install -y wget tar for .rpm distros sudo rm -rf /usr/local/go cd /tmp - wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.24.0.linux-amd64.tar.gz + wget https://go.dev/dl/go1.24.1.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.24.1.linux-amd64.tar.gz export PATH=$PATH:/usr/local/go/bin Installation: diff --git a/ees/ees.go b/ees/ees.go index f2a48ef03..d2f177eb1 100644 --- a/ees/ees.go +++ b/ees/ees.go @@ -84,7 +84,7 @@ func (eeS *EventExporterS) SetupExporterCache() error { continue // skip if caching is disabled } - expCache[chID] = ltcache.NewCache(chCfg.Limit, chCfg.TTL, chCfg.StaticTTL, onCacheEvicted) + expCache[chID] = ltcache.NewCache(chCfg.Limit, chCfg.TTL, chCfg.StaticTTL, false, []func(itmID string, value any){onCacheEvicted}) // Precache exporters if required. if chCfg.Precache { diff --git a/ees/libcdre.go b/ees/libcdre.go index 593dcf38a..e0c9b43e7 100644 --- a/ees/libcdre.go +++ b/ees/libcdre.go @@ -36,12 +36,12 @@ import ( var failedPostCache *ltcache.Cache func init() { - failedPostCache = ltcache.NewCache(-1, 5*time.Second, false, writeFailedPosts) // configurable general + failedPostCache = ltcache.NewCache(-1, 5*time.Second, false, false, []func(itmID string, value any){writeFailedPosts}) // configurable general } // SetFailedPostCacheTTL recreates the failed cache func SetFailedPostCacheTTL(ttl time.Duration) { - failedPostCache = ltcache.NewCache(-1, ttl, false, writeFailedPosts) + failedPostCache = ltcache.NewCache(-1, ttl, false, false, []func(itmID string, value any){writeFailedPosts}) } func writeFailedPosts(_ string, value any) { diff --git a/engine/action.go b/engine/action.go index f0b6a76fb..784b84373 100644 --- a/engine/action.go +++ b/engine/action.go @@ -747,7 +747,7 @@ func setddestinations(ub *Account, a *Action, _ Actions, _ *FilterS, _ any, _ Sh return err } - if err == nil && oldDest != nil { + if oldDest != nil { if err = dm.UpdateReverseDestination(oldDest, newDest, utils.NonTransactional); err != nil { return err } @@ -1132,15 +1132,20 @@ func (a Actions) Sort() { } // Clone returns a clone from object -func (a Actions) Clone() (any, error) { +func (a Actions) Clone() Actions { if a == nil { - return nil, nil + return nil } cln := make(Actions, len(a)) for i, action := range a { cln[i] = action.Clone() } - return cln, nil + return cln +} + +// CacheClone returns a clone of Actions used by ltcache CacheCloner +func (a Actions) CacheClone() any { + return a.Clone() } // HasAction checks if the action list contains an action of the given type. diff --git a/engine/action_plan.go b/engine/action_plan.go index 407024584..d08dd62ad 100644 --- a/engine/action_plan.go +++ b/engine/action_plan.go @@ -72,6 +72,11 @@ type ActionPlan struct { ActionTimings []*ActionTiming } +// CacheClone returns a clone of ActionPlan used by ltcache CacheCloner +func (apl *ActionPlan) CacheClone() any { + return apl.Clone() +} + func (apl *ActionPlan) RemoveAccountID(accID string) (found bool) { if _, found = apl.AccountIDs[accID]; found { delete(apl.AccountIDs, accID) @@ -80,7 +85,7 @@ func (apl *ActionPlan) RemoveAccountID(accID string) (found bool) { } // Clone clones *ActionPlan -func (apl *ActionPlan) Clone() (any, error) { +func (apl *ActionPlan) Clone() *ActionPlan { cln := &ActionPlan{ Id: apl.Id, AccountIDs: apl.AccountIDs.Clone(), @@ -91,7 +96,7 @@ func (apl *ActionPlan) Clone() (any, error) { cln.ActionTimings[i] = act.Clone() } } - return cln, nil + return cln } // Clone clones ActionTiming diff --git a/engine/caches.go b/engine/caches.go index 62f319750..2e29d4d6b 100644 --- a/engine/caches.go +++ b/engine/caches.go @@ -41,37 +41,48 @@ func init() { //AttributeS gob.Register(new(AttributeProfile)) gob.Register(new(AttributeProfileWithAPIOpts)) + gob.Register(new(utils.TPAttributeProfile)) // ThresholdS gob.Register(new(Threshold)) gob.Register(new(ThresholdProfile)) gob.Register(new(ThresholdProfileWithAPIOpts)) gob.Register(new(ThresholdWithAPIOpts)) + gob.Register(new(utils.TPThresholdProfile)) // ResourceS gob.Register(new(Resource)) gob.Register(new(ResourceProfile)) gob.Register(new(ResourceProfileWithAPIOpts)) gob.Register(new(ResourceWithAPIOpts)) + gob.Register(new(utils.TPResourceProfile)) // StatS gob.Register(new(StatQueue)) gob.Register(new(StatQueueProfile)) gob.Register(new(StatQueueProfileWithAPIOpts)) gob.Register(new(StoredStatQueue)) gob.Register(new(StatQueueProfileWithAPIOpts)) + gob.Register(new(utils.TPStatProfile)) // RankingS gob.Register(new(RankingProfile)) + gob.Register(new(utils.TPRankingProfile)) // RouteS gob.Register(new(RouteProfile)) gob.Register(new(RouteProfileWithAPIOpts)) + gob.Register(new(utils.TPRouteProfile)) // FilterS gob.Register(new(Filter)) gob.Register(new(FilterWithAPIOpts)) + gob.Register(new(utils.TPFilterProfile)) // DispatcherS gob.Register(new(DispatcherHost)) gob.Register(new(DispatcherHostProfile)) gob.Register(new(DispatcherHostWithAPIOpts)) + gob.Register(new(DispatcherProfile)) + gob.Register(new(utils.TPDispatcherHost)) + gob.Register(new(utils.TPDispatcherProfile)) // CDRs gob.Register(new(EventCost)) + gob.Register(new(CDR)) // StatMetrics gob.Register(new(StatASR)) @@ -91,12 +102,50 @@ func init() { gob.Register(map[string]any{}) gob.Register(map[string][]map[string]any{}) gob.Register(map[string]string{}) + gob.Register(map[string]int64{}) gob.Register(time.Duration(0)) gob.Register(time.Time{}) gob.Register(url.Values{}) gob.Register(json.RawMessage{}) gob.Register(BalanceSummaries{}) + gob.Register(Actions{}) + gob.Register(new(ActionPlan)) + gob.Register(ActionTriggers{}) + gob.Register(new(utils.TPActions)) + gob.Register(new(utils.TPActionPlan)) + gob.Register(new(utils.TPActionTriggers)) + + gob.Register(new(RatingPlan)) + gob.Register(new(RatingProfile)) + gob.Register(new(utils.TPRatingPlan)) + gob.Register(new(utils.TPRatingProfile)) + gob.Register(new(utils.TPRateRALs)) + + gob.Register(new(Account)) + gob.Register(new(utils.TPAccountActions)) + + gob.Register(new(utils.TPTiming)) + gob.Register(new(utils.ApierTPTiming)) + + gob.Register(new(Destination)) + gob.Register(new(utils.TPDestination)) + gob.Register(new(utils.TPDestinationRate)) + + gob.Register(new(Trend)) + gob.Register(new(TrendProfile)) + gob.Register(new(utils.TPTrendsProfile)) + + gob.Register(new(SharedGroup)) + gob.Register(new(utils.TPSharedGroups)) + + gob.Register(new(ChargerProfile)) + gob.Register(new(utils.TPChargerProfile)) + + gob.Register(Versions{}) + gob.Register(new(StoredSession)) + gob.Register(new(SMCost)) + gob.Register(new(utils.ArgCacheReplicateSet)) gob.Register(new(utils.ArgCacheReplicateRemove)) @@ -114,20 +163,24 @@ func NewCacheS(cfg *config.CGRConfig, dm *DataManager, cpS *CapsStats) (c *Cache k == utils.CacheCapsEvents { continue } - val.OnEvicted = func(itmID string, value any) { - if err := connMgr.Call(context.TODO(), cfg.CacheCfg().ReplicationConns, utils.CacheSv1ReplicateRemove, - &utils.ArgCacheReplicateRemove{ - CacheID: k, - ItemID: itmID, - }, &reply); err != nil { - utils.Logger.Warning(fmt.Sprintf("error: %+v when autoexpired item: %+v from: %+v", err, itmID, k)) - } + val.OnEvicted = []func(itmID string, value any){ + func(itmID string, value any) { + if err := connMgr.Call(context.TODO(), cfg.CacheCfg().ReplicationConns, utils.CacheSv1ReplicateRemove, + &utils.ArgCacheReplicateRemove{ + CacheID: k, + ItemID: itmID, + }, &reply); err != nil { + utils.Logger.Warning(fmt.Sprintf("error: %+v when autoexpired item: %+v from: %+v", err, itmID, k)) + } + }, } } } if _, has := tCache[utils.CacheCapsEvents]; has && cpS != nil { - tCache[utils.CacheCapsEvents].OnEvicted = cpS.OnEvict + tCache[utils.CacheCapsEvents].OnEvicted = []func(itmID string, value interface{}){ + cpS.OnEvict, + } } c = &CacheS{ cfg: cfg, @@ -262,11 +315,6 @@ func (chS *CacheS) CommitTransaction(transID string) { chS.tCache.CommitTransaction(transID) } -// GetCloned is an exported method from TransCache -func (chS *CacheS) GetCloned(chID, itmID string) (cln any, err error) { - return chS.tCache.GetCloned(chID, itmID) -} - // GetPrecacheChannel returns the channel used to signal precaching func (chS *CacheS) GetPrecacheChannel(chID string) chan struct{} { return chS.pcItems[chID] diff --git a/engine/cdr.go b/engine/cdr.go index ab80d71cb..2e5151b9c 100644 --- a/engine/cdr.go +++ b/engine/cdr.go @@ -313,6 +313,11 @@ func (cdr *CDR) Clone() *CDR { return cln } +// CacheClone returns a clone of CDR used by ltcache CacheCloner +func (cdr *CDR) CacheClone() any { + return cdr.Clone() +} + func (cdr *CDR) AsMapStorage() (mp utils.MapStorage) { mp = utils.MapStorage{ utils.MetaReq: cdr.AsMapStringIface(), diff --git a/engine/connmanager.go b/engine/connmanager.go index f28ff6562..44bc20342 100644 --- a/engine/connmanager.go +++ b/engine/connmanager.go @@ -36,7 +36,7 @@ func NewConnManager(cfg *config.CGRConfig, rpcInternal map[string]chan birpc.Cli cM = &ConnManager{ cfg: cfg, rpcInternal: rpcInternal, - connCache: ltcache.NewCache(-1, 0, true, nil), + connCache: ltcache.NewCache(-1, 0, true, false, nil), connLks: make(map[string]*sync.Mutex), } for connID := range cfg.RPCConns() { diff --git a/engine/datadbmock.go b/engine/datadbmock.go index ec0caf67c..faf84b854 100644 --- a/engine/datadbmock.go +++ b/engine/datadbmock.go @@ -565,3 +565,19 @@ func (dbM *DataDBMock) GetSessionsBackupDrv(nodeID string, tnt string) ([]*Store func (dbM *DataDBMock) RemoveSessionsBackupDrv(nodeID, tnt, cgrid string) error { return utils.ErrNotImplemented } + +func (dbM *DataDBMock) DumpDataDB() error { + return utils.ErrNotImplemented +} + +func (dbM *DataDBMock) RewriteDataDB() (err error) { + return utils.ErrNotImplemented +} + +func (dbM *DataDBMock) BackupDataDB(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} + +func (dbM *DataDBMock) BackupStorDBDump(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} diff --git a/engine/datamanager.go b/engine/datamanager.go index 53117e0bc..9d5c1cab8 100644 --- a/engine/datamanager.go +++ b/engine/datamanager.go @@ -28,7 +28,6 @@ import ( "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/guardian" "github.com/cgrates/cgrates/utils" - "github.com/cgrates/ltcache" ) var ( @@ -2314,14 +2313,12 @@ func (dm *DataManager) RemoveSharedGroup(id, transactionID string) (err error) { func (dm *DataManager) GetActions(key string, skipCache bool, transactionID string) (as Actions, err error) { if !skipCache { - if x, err := Cache.GetCloned(utils.CacheActions, key); err != nil { - if err != ltcache.ErrNotFound { - return nil, err - } - } else if x == nil { + if x, ok := Cache.Get(utils.CacheActions, key); !ok { + // continue past if not found + } else if x == nil { // error if found but is nil return nil, utils.ErrNotFound } else { - return x.(Actions), nil + return x.(Actions), nil // Action is cloned on ltcache side } } if dm == nil { @@ -2413,14 +2410,12 @@ func (dm *DataManager) RemoveActions(key string) (err error) { func (dm *DataManager) GetActionPlan(key string, cacheRead, cacheWrite bool, transactionID string) (ats *ActionPlan, err error) { if cacheRead { - if x, err := Cache.GetCloned(utils.CacheActionPlans, key); err != nil { - if err != ltcache.ErrNotFound { // Only consider cache if item was found - return nil, err - } + if x, ok := Cache.Get(utils.CacheActionPlans, key); !ok { + // Only consider cache if item was found } else if x == nil { // item was placed nil in cache return nil, utils.ErrNotFound } else { - return x.(*ActionPlan), nil + return x.(*ActionPlan), nil // ActionPlan is cloned on ltcache side } } if dm == nil { diff --git a/engine/destinations.go b/engine/destinations.go index 616d84e0a..1b4ada9c2 100644 --- a/engine/destinations.go +++ b/engine/destinations.go @@ -38,6 +38,26 @@ type Destination struct { Prefixes []string } +// Clone returns a clone of Destination +func (d *Destination) Clone() *Destination { + if d == nil { + return nil + } + result := &Destination{ + Id: d.Id, + } + if d.Prefixes != nil { + result.Prefixes = make([]string, len(d.Prefixes)) + copy(result.Prefixes, d.Prefixes) + } + return result +} + +// CacheClone returns a clone of Destination used by ltcache CacheCloner +func (d *Destination) CacheClone() any { + return d.Clone() +} + type DestinationWithAPIOpts struct { *Destination Tenant string diff --git a/engine/dispatcherprfl.go b/engine/dispatcherprfl.go index 4b2bd6ba0..b12ac6cb9 100644 --- a/engine/dispatcherprfl.go +++ b/engine/dispatcherprfl.go @@ -22,6 +22,8 @@ import ( "math/rand" "sort" + "maps" + "github.com/cgrates/birpc" "github.com/cgrates/birpc/context" "github.com/cgrates/cgrates/config" @@ -110,6 +112,43 @@ type DispatcherProfile struct { Hosts DispatcherHostProfiles // dispatch to these connections } +// Clone method for DispatcherProfile +func (dp *DispatcherProfile) Clone() *DispatcherProfile { + clone := &DispatcherProfile{ + Tenant: dp.Tenant, + ID: dp.ID, + Strategy: dp.Strategy, + Weight: dp.Weight, + } + if dp.Subsystems != nil { + clone.Subsystems = make([]string, len(dp.Subsystems)) + copy(clone.Subsystems, dp.Subsystems) + } + if dp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(dp.FilterIDs)) + copy(clone.FilterIDs, dp.FilterIDs) + } + if dp.StrategyParams != nil { + clone.StrategyParams = make(map[string]any) + maps.Copy(clone.StrategyParams, dp.StrategyParams) + } + if dp.Hosts != nil { + clone.Hosts = make(DispatcherHostProfiles, len(dp.Hosts)) + for i, hostProfile := range dp.Hosts { + clone.Hosts[i] = hostProfile.Clone() + } + } + if dp.ActivationInterval != nil { + clone.ActivationInterval = dp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of DispatcherProfile used by ltcache CacheCloner +func (dp *DispatcherProfile) CacheClone() any { + return dp.Clone() +} + // DispatcherProfileWithAPIOpts is used in replicatorV1 for dispatcher type DispatcherProfileWithAPIOpts struct { *DispatcherProfile @@ -195,3 +234,8 @@ func (dHPrflIDs DispatcherHostIDs) Clone() (cln DispatcherHostIDs) { copy(cln, dHPrflIDs) return } + +// CacheClone returns a clone of DispatcherHostIDs used by ltcache CacheCloner +func (dHPrflIDs *DispatcherHostIDs) CacheClone() any { + return dHPrflIDs.Clone() +} diff --git a/engine/filters.go b/engine/filters.go index 3fff119c5..cbd5a1e82 100644 --- a/engine/filters.go +++ b/engine/filters.go @@ -203,6 +203,29 @@ type Filter struct { ActivationInterval *utils.ActivationInterval } +// Clone method for Filter +func (fltr *Filter) Clone() *Filter { + clone := &Filter{ + Tenant: fltr.Tenant, + ID: fltr.ID, + } + if fltr.Rules != nil { + clone.Rules = make([]*FilterRule, len(fltr.Rules)) + for i, rule := range fltr.Rules { + clone.Rules[i] = rule.Clone() + } + } + if fltr.ActivationInterval != nil { + clone.ActivationInterval = fltr.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of Filter used by ltcache CacheCloner +func (fltr *Filter) CacheClone() any { + return fltr.Clone() +} + // FilterWithOpts the arguments for the replication type FilterWithAPIOpts struct { *Filter @@ -289,6 +312,39 @@ type FilterRule struct { negative *bool } +// Clone method for FilterRule +func (fltr *FilterRule) Clone() *FilterRule { + clone := &FilterRule{ + Type: fltr.Type, + Element: fltr.Element, + } + if fltr.Values != nil { + clone.Values = make([]string, len(fltr.Values)) + copy(clone.Values, fltr.Values) + } + if fltr.rsrValues != nil { + clone.rsrValues = make(config.RSRParsers, len(fltr.rsrValues)) + copy(clone.rsrValues, fltr.rsrValues) + } + if fltr.negative != nil { + clone.negative = new(bool) + *clone.negative = *fltr.negative + } + if fltr.rsrFilters != nil { + fltr.rsrFilters = make(utils.RSRFilters, len(fltr.rsrFilters)) + for _, filter := range fltr.rsrFilters { + clone.rsrFilters = append(clone.rsrFilters, filter.Clone()) + } + } + if fltr.regexValues != nil { + clone.regexValues = make([]*regexp.Regexp, len(fltr.regexValues)) + for i, regex := range fltr.regexValues { + clone.regexValues[i] = regex.Copy() + } + } + return clone +} + // CompileValues compiles RSR fields func (fltr *FilterRule) CompileValues() (err error) { switch fltr.Type { diff --git a/engine/globalvars.go b/engine/globalvars.go index df2bca821..56dc36960 100644 --- a/engine/globalvars.go +++ b/engine/globalvars.go @@ -34,7 +34,7 @@ var ( ) func init() { - dm = NewDataManager(NewInternalDB(nil, nil, true, config.CgrConfig().DataDbCfg().Items), config.CgrConfig().CacheCfg(), connMgr) + dm = NewDataManager(NewInternalDB(nil, nil, true, false, config.CgrConfig().DataDbCfg().Items), config.CgrConfig().CacheCfg(), connMgr) httpPstrTransport = config.CgrConfig().HTTPCfg().ClientOpts } diff --git a/engine/libattributes.go b/engine/libattributes.go index 857ff9f59..8e014970d 100644 --- a/engine/libattributes.go +++ b/engine/libattributes.go @@ -36,6 +36,23 @@ type Attribute struct { Value config.RSRParsers } +// Clone method for Attribute +func (a *Attribute) Clone() *Attribute { + clone := &Attribute{ + Path: a.Path, + Type: a.Type, + } + if a.FilterIDs != nil { + clone.FilterIDs = make([]string, len(a.FilterIDs)) + copy(clone.FilterIDs, a.FilterIDs) + } + if a.Value != nil { + clone.Value = make(config.RSRParsers, len(a.Value)) + copy(clone.Value, a.Value.Clone()) + } + return clone +} + // AttributeProfile the profile definition for the attributes type AttributeProfile struct { Tenant string @@ -48,6 +65,39 @@ type AttributeProfile struct { Weight float64 } +// Clone method for AttributeProfile struct +func (ap *AttributeProfile) Clone() *AttributeProfile { + clone := &AttributeProfile{ + Tenant: ap.Tenant, + ID: ap.ID, + Blocker: ap.Blocker, + Weight: ap.Weight, + } + if ap.Contexts != nil { + clone.Contexts = make([]string, len(ap.Contexts)) + copy(clone.Contexts, ap.Contexts) + } + if ap.FilterIDs != nil { + clone.FilterIDs = make([]string, len(ap.FilterIDs)) + copy(clone.FilterIDs, ap.FilterIDs) + } + if ap.Attributes != nil { + clone.Attributes = make([]*Attribute, len(ap.Attributes)) + for i, attr := range ap.Attributes { + clone.Attributes[i] = attr.Clone() + } + } + if ap.ActivationInterval != nil { + clone.ActivationInterval = ap.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of AttributeProfile used by ltcache CacheCloner +func (ap *AttributeProfile) CacheClone() any { + return ap.Clone() +} + // AttributeProfileWithAPIOpts is used in replicatorV1 for dispatcher type AttributeProfileWithAPIOpts struct { *AttributeProfile diff --git a/engine/libchargers.go b/engine/libchargers.go index 1ead1eec9..7f78ff111 100644 --- a/engine/libchargers.go +++ b/engine/libchargers.go @@ -35,6 +35,33 @@ type ChargerProfile struct { Weight float64 } +// Clone method for ChargerProfile +func (cp *ChargerProfile) Clone() *ChargerProfile { + clone := &ChargerProfile{ + Tenant: cp.Tenant, + ID: cp.ID, + RunID: cp.RunID, + Weight: cp.Weight, + } + if cp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(cp.FilterIDs)) + copy(clone.FilterIDs, cp.FilterIDs) + } + if cp.AttributeIDs != nil { + clone.AttributeIDs = make([]string, len(cp.AttributeIDs)) + copy(clone.AttributeIDs, cp.AttributeIDs) + } + if cp.ActivationInterval != nil { + clone.ActivationInterval = cp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of ChargerProfile used by ltcache CacheCloner +func (cp *ChargerProfile) CacheClone() any { + return cp.Clone() +} + // ChargerProfileWithAPIOpts is used in replicatorV1 for dispatcher type ChargerProfileWithAPIOpts struct { *ChargerProfile diff --git a/engine/libindex_health.go b/engine/libindex_health.go index 16bfd290b..2d0ab3a7f 100644 --- a/engine/libindex_health.go +++ b/engine/libindex_health.go @@ -64,8 +64,8 @@ func GetAccountActionPlansIndexHealth(dm *DataManager, objLimit, indexLimit int, missingIndex := map[string][]string{} // the indexes are not present but the action plans points to that account // misingAccounts // local cache - indexesCache := ltcache.NewCache(objLimit, objTTL, objStaticTTL, nil) - objectsCache := ltcache.NewCache(indexLimit, indexTTL, indexStaticTTL, nil) + indexesCache := ltcache.NewCache(objLimit, objTTL, objStaticTTL, false, nil) + objectsCache := ltcache.NewCache(indexLimit, indexTTL, indexStaticTTL, false, nil) getCachedIndex := func(acntID string) (apIDs []string, err error) { if x, ok := indexesCache.Get(acntID); ok { @@ -181,8 +181,8 @@ func GetReverseDestinationsIndexHealth(dm *DataManager, objLimit, indexLimit int missingIndex := map[string][]string{} // the indexes are not present but the action plans points to that account // misingAccounts // local cache - indexesCache := ltcache.NewCache(objLimit, objTTL, objStaticTTL, nil) - objectsCache := ltcache.NewCache(indexLimit, indexTTL, indexStaticTTL, nil) + indexesCache := ltcache.NewCache(objLimit, objTTL, objStaticTTL, false, nil) + objectsCache := ltcache.NewCache(indexLimit, indexTTL, indexStaticTTL, false, nil) getCachedIndex := func(prefix string) (dstIDs []string, err error) { if x, ok := indexesCache.Get(prefix); ok { diff --git a/engine/librankings.go b/engine/librankings.go index f78e4a92b..49139f61a 100644 --- a/engine/librankings.go +++ b/engine/librankings.go @@ -24,6 +24,8 @@ import ( "sync" "time" + "maps" + "github.com/cgrates/cgrates/utils" ) @@ -77,6 +79,11 @@ func (rkP *RankingProfile) Clone() (cln *RankingProfile) { return } +// CacheClone returns a clone of RankingProfile used by ltcache CacheCloner +func (rkP *RankingProfile) CacheClone() any { + return rkP.Clone() +} + // NewRankingFromProfile is a constructor for an empty ranking out of it's profile func NewRankingFromProfile(rkP *RankingProfile) (rk *Ranking) { rk = &Ranking{ @@ -118,6 +125,37 @@ type Ranking struct { } +// Clone clones *Ranking +func (r *Ranking) Clone() *Ranking { + r.rMux.RLock() + defer r.rMux.RUnlock() + cln := &Ranking{ + Tenant: r.Tenant, + ID: r.ID, + LastUpdate: r.LastUpdate, + Sorting: r.Sorting, + SortingParameters: make([]string, len(r.SortingParameters)), + SortedStatIDs: make([]string, len(r.SortedStatIDs)), + } + copy(cln.SortingParameters, r.SortingParameters) + copy(cln.SortedStatIDs, r.SortedStatIDs) + cln.Metrics = make(map[string]map[string]float64) + for statID, metricMap := range r.Metrics { + cln.Metrics[statID] = make(map[string]float64) + maps.Copy(cln.Metrics[statID], metricMap) + } + if r.rkPrfl != nil { + cln.rkPrfl = r.rkPrfl.Clone() + } + cln.metricIDs = r.metricIDs.Clone() + return cln +} + +// CacheClone returns a clone of Ranking used by ltcache CacheCloner +func (r *Ranking) CacheClone() any { + return r.Clone() +} + func (r *Ranking) TenantID() string { return utils.ConcatenatedKey(r.Tenant, r.ID) } diff --git a/engine/libstats.go b/engine/libstats.go index fb3257a33..f127daf70 100644 --- a/engine/libstats.go +++ b/engine/libstats.go @@ -48,6 +48,47 @@ type StatQueueProfile struct { lkID string // holds the reference towards guardian lock key } +// Clone clones *StatQueueProfile +func (sqp *StatQueueProfile) Clone() *StatQueueProfile { + if sqp == nil { + return nil + } + result := &StatQueueProfile{ + Tenant: sqp.Tenant, + ID: sqp.ID, + QueueLength: sqp.QueueLength, + TTL: sqp.TTL, + MinItems: sqp.MinItems, + Stored: sqp.Stored, + Blocker: sqp.Blocker, + Weight: sqp.Weight, + lkID: sqp.lkID, + } + if sqp.FilterIDs != nil { + result.FilterIDs = make([]string, len(sqp.FilterIDs)) + copy(result.FilterIDs, sqp.FilterIDs) + } + if sqp.ThresholdIDs != nil { + result.ThresholdIDs = make([]string, len(sqp.ThresholdIDs)) + copy(result.ThresholdIDs, sqp.ThresholdIDs) + } + if sqp.ActivationInterval != nil { + result.ActivationInterval = sqp.ActivationInterval.Clone() + } + if sqp.Metrics != nil { + result.Metrics = make([]*MetricWithFilters, len(sqp.Metrics)) + for i, metric := range sqp.Metrics { + result.Metrics[i] = metric.Clone() + } + } + return result +} + +// CacheClone returns a clone of StatQueueProfile used by ltcache CacheCloner +func (sqp *StatQueueProfile) CacheClone() any { + return sqp.Clone() +} + // StatQueueProfileWithAPIOpts is used in replicatorV1 for dispatcher type StatQueueProfileWithAPIOpts struct { *StatQueueProfile @@ -93,6 +134,21 @@ type MetricWithFilters struct { MetricID string } +// Clone clones *MetricWithFilters +func (mwf *MetricWithFilters) Clone() *MetricWithFilters { + if mwf == nil { + return nil + } + result := &MetricWithFilters{ + MetricID: mwf.MetricID, + } + if mwf.FilterIDs != nil { + result.FilterIDs = make([]string, len(mwf.FilterIDs)) + copy(result.FilterIDs, mwf.FilterIDs) + } + return result +} + // NewStoredStatQueue initiates a StoredStatQueue out of StatQueue func NewStoredStatQueue(sq *StatQueue, ms Marshaler) (sSQ *StoredStatQueue, err error) { sSQ = &StoredStatQueue{ @@ -197,6 +253,35 @@ type StatQueue struct { ttl *time.Duration // timeToLeave, picked on each init } +// Clone clones *StatQueue +func (sq *StatQueue) Clone() *StatQueue { + result := &StatQueue{ + Tenant: sq.Tenant, + ID: sq.ID, + SQItems: make([]SQItem, len(sq.SQItems)), + SQMetrics: sq.SQMetrics, + lkID: sq.lkID, + } + copy(result.SQItems, sq.SQItems) + if sq.sqPrfl != nil { + result.sqPrfl = sq.sqPrfl.Clone() + } + if sq.dirty != nil { + result.dirty = new(bool) + *result.dirty = *sq.dirty + } + if sq.ttl != nil { + result.ttl = new(time.Duration) + *result.ttl = *sq.ttl + } + return result +} + +// CacheClone returns a clone of StatQueue used by ltcache CacheCloner +func (sq *StatQueue) CacheClone() any { + return sq.Clone() +} + // statQueueLockKey returns the ID used to lock a StatQueue with guardian func statQueueLockKey(tnt, id string) string { return utils.ConcatenatedKey(utils.CacheStatQueues, tnt, id) diff --git a/engine/libtrends.go b/engine/libtrends.go index de7540009..22ce6d519 100644 --- a/engine/libtrends.go +++ b/engine/libtrends.go @@ -73,6 +73,11 @@ func (tP *TrendProfile) Clone() (clnTp *TrendProfile) { return } +// CacheClone returns a clone of TrendProfile used by ltcache CacheCloner +func (tP *TrendProfile) CacheClone() any { + return tP.Clone() +} + type TrendProfileWithAPIOpts struct { *TrendProfile APIOpts map[string]any @@ -141,6 +146,11 @@ func (t *Trend) Clone() (tC *Trend) { return } +// CacheClone returns a clone of Trend used by ltcache CacheCloner +func (t *Trend) CacheClone() any { + return t.Clone() +} + // AsTrendSummary transforms the trend into TrendSummary func (t *Trend) asTrendSummary() (ts *TrendSummary) { ts = &TrendSummary{ diff --git a/engine/rateinterval.go b/engine/rateinterval.go index a1ed6b29a..50c70ea58 100644 --- a/engine/rateinterval.go +++ b/engine/rateinterval.go @@ -469,24 +469,26 @@ func (rit *RITiming) Clone() (cln *RITiming) { return } cln = &RITiming{ - ID: rit.ID, - StartTime: rit.StartTime, - EndTime: rit.EndTime, + ID: rit.ID, + StartTime: rit.StartTime, + EndTime: rit.EndTime, + cronString: rit.cronString, + tag: rit.tag, } - if len(rit.Years) != 0 { + if rit.Years != nil { cln.Years = make(utils.Years, len(rit.Years)) copy(cln.Years, rit.Years) } - if len(rit.Months) != 0 { + if rit.Months != nil { cln.Months = make(utils.Months, len(rit.Months)) copy(cln.Months, rit.Months) } - if len(rit.MonthDays) != 0 { + if rit.MonthDays != nil { cln.MonthDays = make(utils.MonthDays, len(rit.MonthDays)) copy(cln.MonthDays, rit.MonthDays) } - if len(rit.WeekDays) != 0 { + if rit.WeekDays != nil { cln.WeekDays = make(utils.WeekDays, len(rit.WeekDays)) copy(cln.WeekDays, rit.WeekDays) } diff --git a/engine/ratingplan.go b/engine/ratingplan.go index b64f3114a..a0086b944 100644 --- a/engine/ratingplan.go +++ b/engine/ratingplan.go @@ -32,6 +32,44 @@ type RatingPlan struct { DestinationRates map[string]RPRateList } +// Clone returns a clone of RatingPlan +func (rp *RatingPlan) Clone() *RatingPlan { + if rp == nil { + return nil + } + result := &RatingPlan{ + Id: rp.Id, + } + if rp.Timings != nil { + result.Timings = make(map[string]*RITiming, len(rp.Timings)) + for k, v := range rp.Timings { + if v != nil { + result.Timings[k] = v.Clone() + } + } + } + if rp.Ratings != nil { + result.Ratings = make(map[string]*RIRate, len(rp.Ratings)) + for k, v := range rp.Ratings { + if v != nil { + result.Ratings[k] = v.Clone() + } + } + } + if rp.DestinationRates != nil { + result.DestinationRates = make(map[string]RPRateList, len(rp.DestinationRates)) + for k, v := range rp.DestinationRates { + result.DestinationRates[k] = v.Clone() + } + } + return result +} + +// CacheClone returns a clone of RatingPlan used by ltcache CacheCloner +func (rp *RatingPlan) CacheClone() any { + return rp.Clone() +} + // RatingPlanWithOpts is used in replicatorV1 for dispatcher type RatingPlanWithAPIOpts struct { *RatingPlan @@ -45,12 +83,36 @@ type RPRate struct { Weight float64 } +// Clone returns a clone of RPRate +func (rpr *RPRate) Clone() *RPRate { + if rpr == nil { + return nil + } + return &RPRate{ + Timing: rpr.Timing, + Rating: rpr.Rating, + Weight: rpr.Weight, + } +} + func (rpr *RPRate) Equal(orpr *RPRate) bool { return rpr.Timing == orpr.Timing && rpr.Rating == orpr.Rating && rpr.Weight == orpr.Weight } type RPRateList []*RPRate +// Clone returns a clone of RPRateList +func (rprl RPRateList) Clone() RPRateList { + if rprl == nil { + return nil + } + result := make(RPRateList, len(rprl)) + for i, rate := range rprl { + result[i] = rate.Clone() + } + return result +} + func (rp *RatingPlan) RateIntervalList(dId string) RateIntervalList { ril := make(RateIntervalList, len(rp.DestinationRates[dId])) for i, rpr := range rp.DestinationRates[dId] { diff --git a/engine/ratingprofile.go b/engine/ratingprofile.go index 3a147ab10..dae7575f3 100644 --- a/engine/ratingprofile.go +++ b/engine/ratingprofile.go @@ -33,6 +33,25 @@ type RatingProfile struct { RatingPlanActivations RatingPlanActivations } +// Clone returns a clone of RatingProfile +func (rp *RatingProfile) Clone() *RatingProfile { + if rp == nil { + return nil + } + + result := &RatingProfile{ + Id: rp.Id, + RatingPlanActivations: rp.RatingPlanActivations.Clone(), + } + + return result +} + +// CacheClone returns a clone of RatingProfile used by ltcache CacheCloner +func (rp *RatingProfile) CacheClone() any { + return rp.Clone() +} + // RatingProfileWithAPIOpts is used in replicatorV1 for dispatcher type RatingProfileWithAPIOpts struct { *RatingProfile @@ -46,6 +65,25 @@ type RatingPlanActivation struct { FallbackKeys []string } +// Clone returns a clone of RatingPlanActivation +func (rpa *RatingPlanActivation) Clone() *RatingPlanActivation { + if rpa == nil { + return nil + } + + result := &RatingPlanActivation{ + ActivationTime: rpa.ActivationTime, + RatingPlanId: rpa.RatingPlanId, + } + + if rpa.FallbackKeys != nil { + result.FallbackKeys = make([]string, len(rpa.FallbackKeys)) + copy(result.FallbackKeys, rpa.FallbackKeys) + } + + return result +} + func (rpa *RatingPlanActivation) Equal(orpa *RatingPlanActivation) bool { return rpa.ActivationTime == orpa.ActivationTime && rpa.RatingPlanId == orpa.RatingPlanId @@ -53,6 +91,20 @@ func (rpa *RatingPlanActivation) Equal(orpa *RatingPlanActivation) bool { type RatingPlanActivations []*RatingPlanActivation +// Clone returns a clone of RatingPlanActivations +func (rpas RatingPlanActivations) Clone() RatingPlanActivations { + if rpas == nil { + return nil + } + + result := make(RatingPlanActivations, len(rpas)) + for i, rpa := range rpas { + result[i] = rpa.Clone() + } + + return result +} + func (rpas RatingPlanActivations) Len() int { return len(rpas) } diff --git a/engine/resources.go b/engine/resources.go index 1d45195fd..cc3f30a44 100644 --- a/engine/resources.go +++ b/engine/resources.go @@ -48,6 +48,44 @@ type ResourceProfile struct { lkID string // holds the reference towards guardian lock key } +// Clone clones *ResourceProfile +func (rp *ResourceProfile) Clone() *ResourceProfile { + if rp == nil { + return nil + } + clone := &ResourceProfile{ + Tenant: rp.Tenant, + ID: rp.ID, + UsageTTL: rp.UsageTTL, + Limit: rp.Limit, + AllocationMessage: rp.AllocationMessage, + Blocker: rp.Blocker, + Stored: rp.Stored, + Weight: rp.Weight, + lkID: rp.lkID, + } + if rp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(rp.FilterIDs)) + copy(clone.FilterIDs, rp.FilterIDs) + } + if rp.ThresholdIDs != nil { + clone.ThresholdIDs = make([]string, len(rp.ThresholdIDs)) + copy(clone.ThresholdIDs, rp.ThresholdIDs) + } + if rp.ActivationInterval != nil { + clone.ActivationInterval = &utils.ActivationInterval{ + ActivationTime: rp.ActivationInterval.ActivationTime, + ExpiryTime: rp.ActivationInterval.ExpiryTime, + } + } + return clone +} + +// CacheClone returns a clone of ResourceProfile used by ltcache CacheCloner +func (rp *ResourceProfile) CacheClone() any { + return rp.Clone() +} + // ResourceProfileWithAPIOpts is used in replicatorV1 for dispatcher type ResourceProfileWithAPIOpts struct { *ResourceProfile @@ -109,6 +147,9 @@ func (ru *ResourceUsage) isActive(atTime time.Time) bool { // Clone duplicates ru func (ru *ResourceUsage) Clone() (cln *ResourceUsage) { + if ru == nil { + return nil + } cln = new(ResourceUsage) *cln = *ru return @@ -128,6 +169,44 @@ type Resource struct { rPrf *ResourceProfile // for ordering purposes } +// Clone clones *Resource +func (r *Resource) Clone() *Resource { + if r == nil { + return nil + } + clone := &Resource{ + Tenant: r.Tenant, + ID: r.ID, + lkID: r.lkID, + } + if r.Usages != nil { + clone.Usages = make(map[string]*ResourceUsage, len(r.Usages)) + for key, usage := range r.Usages { + clone.Usages[key] = usage.Clone() + } + } + if r.TTLIdx != nil { + clone.TTLIdx = make([]string, len(r.TTLIdx)) + copy(clone.TTLIdx, r.TTLIdx) + } + if r.ttl != nil { + ttlCopy := *r.ttl + clone.ttl = &ttlCopy + } + if r.tUsage != nil { + tUsageCopy := *r.tUsage + clone.tUsage = &tUsageCopy + } + if r.dirty != nil { + dirtyCopy := *r.dirty + clone.dirty = &dirtyCopy + } + if r.rPrf != nil { + clone.rPrf = r.rPrf.Clone() + } + return clone +} + // resourceLockKey returns the ID used to lock a resource with guardian func resourceLockKey(tnt, id string) string { return utils.ConcatenatedKey(utils.CacheResources, tnt, id) diff --git a/engine/routes.go b/engine/routes.go index 2183bbc10..7c70d7d1d 100644 --- a/engine/routes.go +++ b/engine/routes.go @@ -26,6 +26,8 @@ import ( "strings" "time" + "maps" + "github.com/cgrates/birpc/context" "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/utils" @@ -47,6 +49,47 @@ type Route struct { lazyCheckRules []*FilterRule } +// Clone method for Route +func (r *Route) Clone() *Route { + clone := &Route{ + ID: r.ID, + Weight: r.Weight, + Blocker: r.Blocker, + RouteParameters: r.RouteParameters, + } + if r.FilterIDs != nil { + clone.FilterIDs = make([]string, len(r.FilterIDs)) + copy(clone.FilterIDs, r.FilterIDs) + } + if r.AccountIDs != nil { + clone.AccountIDs = make([]string, len(r.AccountIDs)) + copy(clone.AccountIDs, r.AccountIDs) + } + if r.RatingPlanIDs != nil { + clone.RatingPlanIDs = make([]string, len(r.RatingPlanIDs)) + copy(clone.RatingPlanIDs, r.RatingPlanIDs) + } + if r.ResourceIDs != nil { + clone.ResourceIDs = make([]string, len(r.ResourceIDs)) + copy(clone.ResourceIDs, r.ResourceIDs) + } + if r.StatIDs != nil { + clone.StatIDs = make([]string, len(r.StatIDs)) + copy(clone.StatIDs, r.StatIDs) + } + if r.cacheRoute != nil { + clone.cacheRoute = make(map[string]any) + maps.Copy(clone.cacheRoute, r.cacheRoute) + } + if r.lazyCheckRules != nil { + clone.lazyCheckRules = make([]*FilterRule, len(r.lazyCheckRules)) + for i, rule := range r.lazyCheckRules { + clone.lazyCheckRules[i] = rule.Clone() + } + } + return clone +} + // RouteProfile represents the configuration of a Route profile type RouteProfile struct { Tenant string @@ -59,6 +102,39 @@ type RouteProfile struct { Weight float64 } +// Clone method for RouteProfile +func (rp *RouteProfile) Clone() *RouteProfile { + clone := &RouteProfile{ + Tenant: rp.Tenant, + ID: rp.ID, + Sorting: rp.Sorting, + Weight: rp.Weight, + } + if rp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(rp.FilterIDs)) + copy(clone.FilterIDs, rp.FilterIDs) + } + if rp.SortingParameters != nil { + clone.SortingParameters = make([]string, len(rp.SortingParameters)) + copy(clone.SortingParameters, rp.SortingParameters) + } + if rp.Routes != nil { + clone.Routes = make([]*Route, len(rp.Routes)) + for i, route := range rp.Routes { + clone.Routes[i] = route.Clone() + } + } + if rp.ActivationInterval != nil { + clone.ActivationInterval = rp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of RouteProfile used by ltcache CacheCloner +func (rp *RouteProfile) CacheClone() any { + return rp.Clone() +} + // RouteProfileWithAPIOpts is used in replicatorV1 for dispatcher type RouteProfileWithAPIOpts struct { *RouteProfile diff --git a/engine/storage_interface.go b/engine/storage_interface.go index a470dd929..197f68214 100644 --- a/engine/storage_interface.go +++ b/engine/storage_interface.go @@ -143,6 +143,9 @@ type DataDB interface { SetBackupSessionsDrv(nodeID string, tenant string, sessions []*StoredSession) error GetSessionsBackupDrv(nodeID string, tenant string) ([]*StoredSession, error) RemoveSessionsBackupDrv(nodeID, tenant, cgrid string) error + DumpDataDB() error + RewriteDataDB() error + BackupDataDB(string, bool) error } type StorDB interface { @@ -220,6 +223,9 @@ type LoadWriter interface { SetTPChargers([]*utils.TPChargerProfile) error SetTPDispatcherProfiles([]*utils.TPDispatcherProfile) error SetTPDispatcherHosts([]*utils.TPDispatcherHost) error + DumpStorDB() error + RewriteStorDB() error + BackupStorDB(string, bool) error } // NewMarshaler returns the marshaler type selected by mrshlerStr diff --git a/engine/storage_internal_datadb.go b/engine/storage_internal_datadb.go index 3777a7995..e771d6e7c 100644 --- a/engine/storage_internal_datadb.go +++ b/engine/storage_internal_datadb.go @@ -45,7 +45,7 @@ type InternalDB struct { } // NewInternalDB constructs an InternalDB -func NewInternalDB(stringIndexedFields, prefixIndexedFields []string, isDataDB bool, +func NewInternalDB(stringIndexedFields, prefixIndexedFields []string, isDataDB, clone bool, itmsCfg map[string]*config.ItemOpt) *InternalDB { tcCfg := make(map[string]*ltcache.CacheConfig, len(itmsCfg)) for k, cPcfg := range itmsCfg { @@ -53,19 +53,46 @@ func NewInternalDB(stringIndexedFields, prefixIndexedFields []string, isDataDB b MaxItems: cPcfg.Limit, TTL: cPcfg.TTL, StaticTTL: cPcfg.StaticTTL, + Clone: clone, } } ms, _ := NewMarshaler(config.CgrConfig().GeneralCfg().DBDataEncoding) + return newInternalDB(stringIndexedFields, prefixIndexedFields, isDataDB, ms, + ltcache.NewTransCache(tcCfg)) +} + +// newInternalDB constructs an InternalDB struct with a recovered or new TransCache +func newInternalDB(stringIndexedFields, prefixIndexedFields []string, isDataDB bool, ms Marshaler, db *ltcache.TransCache) *InternalDB { return &InternalDB{ stringIndexedFields: stringIndexedFields, prefixIndexedFields: prefixIndexedFields, cnter: utils.NewCounter(time.Now().UnixNano(), 0), ms: ms, - db: ltcache.NewTransCache(tcCfg), + db: db, isDataDB: isDataDB, } } +// Will recover a database from a dump file to memory +func RecoverDB(stringIndexedFields, prefixIndexedFields []string, isDataDB bool, + itmsCfg map[string]*config.ItemOpt, fldrPath, backupPath string, timeout time.Duration, dumpInterval, rewriteInterval time.Duration, writeLimit int) (*InternalDB, error) { + tcCfg := make(map[string]*ltcache.CacheConfig, len(itmsCfg)) + for k, cPcfg := range itmsCfg { + tcCfg[k] = <cache.CacheConfig{ + MaxItems: cPcfg.Limit, + TTL: cPcfg.TTL, + StaticTTL: cPcfg.StaticTTL, + Clone: true, + } + } + ms, _ := NewMarshaler(config.CgrConfig().GeneralCfg().DBDataEncoding) + tc, err := ltcache.NewTransCacheWithOfflineCollector(fldrPath, backupPath, timeout, dumpInterval, rewriteInterval, writeLimit, tcCfg, utils.Logger) + if err != nil { + return nil, err + } + return newInternalDB(stringIndexedFields, prefixIndexedFields, isDataDB, ms, tc), nil +} + // SetStringIndexedFields set the stringIndexedFields, used at StorDB reload (is thread safe) func (iDB *InternalDB) SetStringIndexedFields(stringIndexedFields []string) { iDB.indexedFieldsMutex.Lock() @@ -81,7 +108,9 @@ func (iDB *InternalDB) SetPrefixIndexedFields(prefixIndexedFields []string) { } // Close only to implement Storage interface -func (iDB *InternalDB) Close() {} +func (iDB *InternalDB) Close() { + iDB.db.Shutdown() +} // Flush clears the cache func (iDB *InternalDB) Flush(string) error { @@ -964,3 +993,18 @@ func (iDB *InternalDB) RemoveSessionsBackupDrv(nodeID, tnt, cgrid string) error iDB.db.Remove(utils.CacheSessionsBackup, cgrid, true, utils.NonTransactional) return nil } + +// Will dump everything inside datadb to files +func (iDB *InternalDB) DumpDataDB() (err error) { + return iDB.db.DumpAll() +} + +// Will rewrite every dump file of DataDB +func (iDB *InternalDB) RewriteDataDB() (err error) { + return iDB.db.RewriteAll() +} + +// BackupDataDB will momentarely stop any dumping and rewriting until all dump folder is backed up in folder path backupFolderPath, making zip true will create a zip file in the path instead +func (iDB *InternalDB) BackupDataDB(backupFolderPath string, zip bool) (err error) { + return iDB.db.BackupDumpFolder(backupFolderPath, zip) +} diff --git a/engine/storage_internal_stordb.go b/engine/storage_internal_stordb.go index 5c771e6de..db8cb0e1b 100644 --- a/engine/storage_internal_stordb.go +++ b/engine/storage_internal_stordb.go @@ -1550,3 +1550,18 @@ func (iDB *InternalDB) SetSMCost(smCost *SMCost) (err error) { cacheCommit(utils.NonTransactional), utils.NonTransactional) return err } + +// Will dump everything inside stordb to files +func (iDB *InternalDB) DumpStorDB() (err error) { + return iDB.db.DumpAll() +} + +// Will rewrite every dump file of StorDB +func (iDB *InternalDB) RewriteStorDB() (err error) { + return iDB.db.RewriteAll() +} + +// BackupStorDB will momentarely stop any dumping and rewriting until all dump folder is backed up in folder path backupFolderPath, making zip true will create a zip file in the path instead +func (iDB *InternalDB) BackupStorDB(backupFolderPath string, zip bool) (err error) { + return iDB.db.BackupDumpFolder(backupFolderPath, zip) +} diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go index 8b0ed6d38..baa0a5c07 100644 --- a/engine/storage_mongo_datadb.go +++ b/engine/storage_mongo_datadb.go @@ -2229,3 +2229,18 @@ func (ms *MongoStorage) RemoveSessionsBackupDrv(nodeID, tnt, cgrid string) error return err }) } + +// DumpDataDB will dump all of datadb from memory to a file, only for InternalDB +func (ms *MongoStorage) DumpDataDB() error { + return utils.ErrNotImplemented +} + +// Will rewrite every dump file of DataDB, only for InternalDB +func (ms *MongoStorage) RewriteDataDB() (err error) { + return utils.ErrNotImplemented +} + +// BackupDataDB only for InternalDB +func (ms *MongoStorage) BackupDataDB(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go index 957f9c8ae..ec6e13545 100644 --- a/engine/storage_mongo_stordb.go +++ b/engine/storage_mongo_stordb.go @@ -1745,3 +1745,18 @@ func (ms *MongoStorage) RemoveVersions(vrs Versions) error { func (ms *MongoStorage) GetStorageType() string { return utils.MetaMongo } + +// RewriteStorDB used only for InternalDB +func (ms *MongoStorage) DumpStorDB() (err error) { + return utils.ErrNotImplemented +} + +// RewriteStorDB used only for InternalDB +func (ms *MongoStorage) RewriteStorDB() (err error) { + return utils.ErrNotImplemented +} + +// BackupStorDB used only for InternalDB +func (ms *MongoStorage) BackupStorDB(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} diff --git a/engine/storage_redis.go b/engine/storage_redis.go index 679a7f93e..2a2466944 100644 --- a/engine/storage_redis.go +++ b/engine/storage_redis.go @@ -143,7 +143,7 @@ func redisDial(network, addr string, attempts int, opts ...radix.DialOpt) (conn fib := utils.FibDuration(time.Millisecond, 0) for i := 0; i < attempts; i++ { if conn, err = radix.Dial(network, addr, opts...); err == nil || - (err != nil && !strings.Contains(err.Error(), redisLoadError)) { + !strings.Contains(err.Error(), redisLoadError) { break } time.Sleep(fib()) @@ -1405,3 +1405,18 @@ func (rs *RedisStorage) RemoveSessionsBackupDrv(nodeID, tnt, cgrid string) error } return rs.Cmd(nil, redis_HDEL, utils.SessionsBackupPrefix+utils.ConcatenatedKey(tnt, nodeID), cgrid) } + +// DumpDataDB will dump all of datadb from memory to a file, only for InternalDB +func (rs *RedisStorage) DumpDataDB() error { + return utils.ErrNotImplemented +} + +// Will rewrite every dump file of DataDB, only for InternalDB +func (rs *RedisStorage) RewriteDataDB() (err error) { + return utils.ErrNotImplemented +} + +// BackupDataDB will momentarely stop any dumping and rewriting until all dump folder is backed up in folder path backupFolderPath, making zip true will create a zip file in the path instead, only for InternalDB +func (rs *RedisStorage) BackupDataDB(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} diff --git a/engine/storage_sql.go b/engine/storage_sql.go index b88798461..790931998 100644 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -1675,3 +1675,18 @@ func (sqls *SQLStorage) RemoveVersions(vrs Versions) (err error) { tx.Commit() return } + +// Will dump everything inside stordb to a file, only for InternalDB +func (sqls *SQLStorage) DumpStorDB() (err error) { + return utils.ErrNotImplemented +} + +// Will rewrite every dump file of StorDB, only for InternalDB +func (sqls *SQLStorage) RewriteStorDB() (err error) { + return utils.ErrNotImplemented +} + +// BackupStorDB used only for InternalDB +func (sqls *SQLStorage) BackupStorDB(backupFolderPath string, zip bool) (err error) { + return utils.ErrNotImplemented +} diff --git a/engine/storage_utils.go b/engine/storage_utils.go index a73c11d22..a59c8fcfd 100644 --- a/engine/storage_utils.go +++ b/engine/storage_utils.go @@ -55,7 +55,17 @@ func NewDataDBConn(dbType, host, port, name, user, case utils.MetaMongo: d, err = NewMongoStorage(opts.MongoConnScheme, host, port, name, user, pass, marshaler, utils.DataDB, nil, opts.MongoQueryTimeout) case utils.MetaInternal: - d = NewInternalDB(nil, nil, true, itmsCfg) + if config.CgrConfig().DataDbCfg().Opts.InternalDBDumpInterval != 0 { + d, err = RecoverDB(nil, nil, true, itmsCfg, + config.CgrConfig().DataDbCfg().Opts.InternalDBDumpPath, + config.CgrConfig().DataDbCfg().Opts.InternalDBBackupPath, + config.CgrConfig().DataDbCfg().Opts.InternalDBStartTimeout, + config.CgrConfig().DataDbCfg().Opts.InternalDBDumpInterval, + config.CgrConfig().DataDbCfg().Opts.InternalDBRewriteInterval, + config.CgrConfig().DataDbCfg().Opts.InternalDBWriteLimit) + return + } + d = NewInternalDB(nil, nil, true, true, itmsCfg) default: err = fmt.Errorf("unsupported db_type <%s>", dbType) } @@ -77,7 +87,17 @@ func NewStorDBConn(dbType, host, port, name, user, pass, marshaler string, db, err = NewMySQLStorage(host, port, name, user, pass, marshaler, opts.SQLMaxOpenConns, opts.SQLMaxIdleConns, opts.SQLLogLevel, opts.SQLConnMaxLifetime, opts.MySQLLocation, opts.MySQLDSNParams) case utils.MetaInternal: - db = NewInternalDB(stringIndexedFields, prefixIndexedFields, false, itmsCfg) + if config.CgrConfig().StorDbCfg().Opts.InternalDBDumpInterval != 0 { + db, err = RecoverDB(stringIndexedFields, prefixIndexedFields, false, itmsCfg, + config.CgrConfig().StorDbCfg().Opts.InternalDBDumpPath, + config.CgrConfig().DataDbCfg().Opts.InternalDBBackupPath, + config.CgrConfig().StorDbCfg().Opts.InternalDBStartTimeout, + config.CgrConfig().StorDbCfg().Opts.InternalDBDumpInterval, + config.CgrConfig().StorDbCfg().Opts.InternalDBRewriteInterval, + config.CgrConfig().StorDbCfg().Opts.InternalDBWriteLimit) + return + } + db = NewInternalDB(stringIndexedFields, prefixIndexedFields, false, true, itmsCfg) default: err = fmt.Errorf("unknown db '%s' valid options are [%s, %s, %s, %s]", dbType, utils.MetaMySQL, utils.MetaMongo, utils.MetaPostgres, utils.MetaInternal) @@ -118,6 +138,27 @@ type SMCost struct { CostDetails *EventCost } +// Clone clones SMCost +func (s *SMCost) Clone() *SMCost { + clone := &SMCost{ + CGRID: s.CGRID, + RunID: s.RunID, + OriginHost: s.OriginHost, + OriginID: s.OriginID, + CostSource: s.CostSource, + Usage: s.Usage, + } + if s.CostDetails != nil { + clone.CostDetails = s.CostDetails.Clone() + } + return clone +} + +// CacheClone returns a clone of SMCost used by ltcache CacheCloner +func (s *SMCost) CacheClone() any { + return s.Clone() +} + type AttrCDRSStoreSMCost struct { Cost *SMCost CheckDuplicate bool diff --git a/engine/thresholds.go b/engine/thresholds.go index 638e73304..3a538a831 100644 --- a/engine/thresholds.go +++ b/engine/thresholds.go @@ -54,6 +54,38 @@ type ThresholdProfile struct { lkID string // holds the reference towards guardian lock key } +// Clone clones *ThresholdProfile +func (tp *ThresholdProfile) Clone() *ThresholdProfile { + clone := &ThresholdProfile{ + Tenant: tp.Tenant, + ID: tp.ID, + MaxHits: tp.MaxHits, + MinHits: tp.MinHits, + MinSleep: tp.MinSleep, + Blocker: tp.Blocker, + Weight: tp.Weight, + Async: tp.Async, + lkID: tp.lkID, + } + if tp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tp.FilterIDs)) + copy(clone.FilterIDs, tp.FilterIDs) + } + if tp.ActionIDs != nil { + clone.ActionIDs = make([]string, len(tp.ActionIDs)) + copy(clone.ActionIDs, tp.ActionIDs) + } + if tp.ActivationInterval != nil { + clone.ActivationInterval = tp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of ThresholdProfile used by ltcache CacheCloner +func (tp *ThresholdProfile) CacheClone() any { + return tp.Clone() +} + // TenantID returns the concatenated key beteen tenant and ID func (tp *ThresholdProfile) TenantID() string { return utils.ConcatenatedKey(tp.Tenant, tp.ID) @@ -107,6 +139,30 @@ type Threshold struct { dirty *bool // needs save } +// Clone clones *Threshold +func (t *Threshold) Clone() *Threshold { + clone := &Threshold{ + Tenant: t.Tenant, + ID: t.ID, + Hits: t.Hits, + Snooze: t.Snooze, + lkID: t.lkID, + } + if t.tPrfl != nil { + clone.tPrfl = t.tPrfl.Clone() + } + if t.dirty != nil { + clone.dirty = new(bool) + *clone.dirty = *t.dirty + } + return clone +} + +// CacheClone returns a clone of Threshold used by ltcache CacheCloner +func (t *Threshold) CacheClone() any { + return t.Clone() +} + // TenantID returns the concatenated key beteen tenant and ID func (t *Threshold) TenantID() string { return utils.ConcatenatedKey(t.Tenant, t.ID) diff --git a/engine/tpreader_test.go b/engine/tpreader_test.go index 746afe288..ee3545990 100644 --- a/engine/tpreader_test.go +++ b/engine/tpreader_test.go @@ -1047,7 +1047,9 @@ func TestTPReaderLoadDestinationsFiltered(t *testing.T) { MaxItems: 3, TTL: time.Minute * 30, StaticTTL: false, - OnEvicted: func(itmID string, value any) { + OnEvicted: []func(itmID string, value interface{}){ + func(itmID string, value any) { + }, }, }}, ) @@ -1192,7 +1194,9 @@ func TestTpReaderLoadAccountActions(t *testing.T) { MaxItems: 3, TTL: time.Minute * 30, StaticTTL: false, - OnEvicted: func(itmID string, value any) { + OnEvicted: []func(itmID string, value any){ + func(itmID string, value any) { + }, }, }}, ) @@ -1393,7 +1397,9 @@ func TestTpReaderLoadTimingsErr(t *testing.T) { MaxItems: 3, TTL: time.Minute * 30, StaticTTL: false, - OnEvicted: func(itmID string, value any) { + OnEvicted: []func(itmID string, value any){ + func(itmID string, value any) { + }, }, }}, ) @@ -1425,7 +1431,9 @@ func TestLoadDestinationRatesErr(t *testing.T) { MaxItems: 3, TTL: time.Minute * 30, StaticTTL: false, - OnEvicted: func(itmID string, value any) { + OnEvicted: []func(itmID string, value any){ + func(itmID string, value any) { + }, }, }, }, @@ -1517,7 +1525,9 @@ func TestLoadRatingProfilesFiltered(t *testing.T) { MaxItems: 3, TTL: time.Minute * 30, StaticTTL: false, - OnEvicted: func(itmID string, value any) { + OnEvicted: []func(itmID string, value any){ + func(itmID string, value any) { + }, }, }, }, diff --git a/ers/ers.go b/ers/ers.go index 9827ae16e..68a3f9e08 100644 --- a/ers/ers.go +++ b/ers/ers.go @@ -61,7 +61,7 @@ func NewERService(cfg *config.CGRConfig, datadb *engine.DataManager, filterS *en filterS: filterS, connMgr: connMgr, } - ers.partialCache = ltcache.NewCache(ltcache.UnlimitedCaching, cfg.ERsCfg().PartialCacheTTL, false, ers.onEvicted) + ers.partialCache = ltcache.NewCache(ltcache.UnlimitedCaching, cfg.ERsCfg().PartialCacheTTL, false, false, []func(itmID string, value any){ers.onEvicted}) return } diff --git a/go.mod b/go.mod index 7693a9458..ee47670ba 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cgrates/cgrates -go 1.24.0 +go 1.24.1 // replace github.com/cgrates/radigo => ../radigo @@ -25,7 +25,7 @@ require ( github.com/cgrates/go-diameter v0.0.0-20250228104837-c21fdf924ab5 github.com/cgrates/janusgo v0.0.0-20240503152118-188a408d7e73 github.com/cgrates/kamevapi v0.0.0-20240307160311-26273f03eedf - github.com/cgrates/ltcache v0.0.0-20240411152156-e673692056db + github.com/cgrates/ltcache v0.0.0-20250404091005-a6ffec15918c github.com/cgrates/radigo v0.0.0-20240123163129-491c899df727 github.com/cgrates/rpcclient v0.0.0-20240816141816-52dd1074499e github.com/cgrates/sipingo v1.0.1-0.20200514112313-699ebc1cdb8e @@ -49,8 +49,8 @@ require ( github.com/segmentio/kafka-go v0.4.47 github.com/ugorji/go/codec v1.2.12 go.mongodb.org/mongo-driver v1.16.1 - golang.org/x/crypto v0.26.0 - golang.org/x/net v0.28.0 + golang.org/x/crypto v0.28.0 + golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.22.0 google.golang.org/api v0.192.0 gorm.io/driver/mysql v1.5.7 @@ -71,6 +71,7 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect go.opentelemetry.io/otel/sdk v1.24.0 // indirect + golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240730163845-b1a4ccb954bf // indirect ) @@ -139,11 +140,11 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/mod v0.18.0 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect google.golang.org/grpc v1.64.1 // indirect diff --git a/go.sum b/go.sum index 0a29c7210..2c1932fbc 100644 --- a/go.sum +++ b/go.sum @@ -85,8 +85,8 @@ github.com/cgrates/janusgo v0.0.0-20240503152118-188a408d7e73 h1:7AYhvpegrSkY9tL github.com/cgrates/janusgo v0.0.0-20240503152118-188a408d7e73/go.mod h1:XBQDDjrIn+RCS4PDApYjTWwdp51NbqYfUGAYtzSB5ag= github.com/cgrates/kamevapi v0.0.0-20240307160311-26273f03eedf h1:GbMJzvtwdX1OCEmsqSts/cRCIcIMvo8AYtC2dQExWlg= github.com/cgrates/kamevapi v0.0.0-20240307160311-26273f03eedf/go.mod h1:oEq/JbubkOD2pXHvDy4r7519NkxriONisrnVpkCaNJw= -github.com/cgrates/ltcache v0.0.0-20240411152156-e673692056db h1:JRgzMS5kJ1WxaveoZ1YG/FowUDxFQXD3GjCHR7rH0Gk= -github.com/cgrates/ltcache v0.0.0-20240411152156-e673692056db/go.mod h1:jVYq943GCWxhXXxi/1NthN9ATeyFUSbwJRbN/bb+ADQ= +github.com/cgrates/ltcache v0.0.0-20250404091005-a6ffec15918c h1:IvC0/acpyQBcRl8JshUnCmaJjIY3/lkOw/AKDdUXc3w= +github.com/cgrates/ltcache v0.0.0-20250404091005-a6ffec15918c/go.mod h1:E6jHedwqFkZsXZG7MVvnCagpFQgKqDF6tg/mUqRRu34= github.com/cgrates/radigo v0.0.0-20240123163129-491c899df727 h1:rhYHlbfEPDNreekd1ZtUYi/NbFm5cEl8twQZ3c/0nYU= github.com/cgrates/radigo v0.0.0-20240123163129-491c899df727/go.mod h1:W/5LcOm9jaz0NfIFT09bxjddEai8DTSfw9poqDqtAX4= github.com/cgrates/rpcclient v0.0.0-20240816141816-52dd1074499e h1:rwvvB0F9WZNmBriZSNo6dbYy1H26yueeLBkuxQMUW0E= @@ -307,17 +307,19 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 h1:1wqE9dj9NpSm04INVsJhhEUzhuDVjbcyKH91sVyPATw= +golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -335,8 +337,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -368,8 +370,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -383,8 +385,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -394,8 +396,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/packages/copr.fedorainfracloud.org/cgrates.spec b/packages/copr.fedorainfracloud.org/cgrates.spec index 2b6030e40..dc3045613 100644 --- a/packages/copr.fedorainfracloud.org/cgrates.spec +++ b/packages/copr.fedorainfracloud.org/cgrates.spec @@ -1,6 +1,6 @@ # Define global variables %global version 0.11.0~dev -%global go_version 1.24.0 +%global go_version 1.24.1 # Define system paths %define debug_package %{nil} diff --git a/packages/redhat_fedora/cgrates.spec b/packages/redhat_fedora/cgrates.spec index 3f7d42a2d..e1716f6e8 100644 --- a/packages/redhat_fedora/cgrates.spec +++ b/packages/redhat_fedora/cgrates.spec @@ -1,6 +1,6 @@ # Define global variables %global version 0.11.0~dev -%global go_version 1.24.0 +%global go_version 1.24.1 %global git_commit %(echo $gitLastCommit) %global releaseTag %(echo $rpmTag) diff --git a/utils/apitpdata.go b/utils/apitpdata.go index 4d43d6a83..5a0d5f757 100644 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -97,6 +97,22 @@ type TPDestination struct { Prefixes []string // Prefixes attached to this destination } +// Clone method for TPDestination +func (tpd *TPDestination) Clone() *TPDestination { + clone := &TPDestination{ + TPid: tpd.TPid, + ID: tpd.ID, + } + clone.Prefixes = make([]string, len(tpd.Prefixes)) + copy(clone.Prefixes, tpd.Prefixes) + return clone +} + +// CacheClone returns a clone of TPDestination used by ltcache CacheCloner +func (tpd *TPDestination) CacheClone() any { + return tpd.Clone() +} + // This file deals with tp_* data definition // TPRateRALs -> TPRateRALs type TPRateRALs struct { @@ -105,6 +121,24 @@ type TPRateRALs struct { RateSlots []*RateSlot // One or more RateSlots } +// Clone method for TPRateRALs +func (tpr *TPRateRALs) Clone() *TPRateRALs { + clone := &TPRateRALs{ + TPid: tpr.TPid, + ID: tpr.ID, + } + clone.RateSlots = make([]*RateSlot, len(tpr.RateSlots)) + for i, slot := range tpr.RateSlots { + clone.RateSlots[i] = slot.Clone() + } + return clone +} + +// CacheClone returns a clone of TPRateRALs used by ltcache CacheCloner +func (tpr *TPRateRALs) CacheClone() any { + return tpr.Clone() +} + // Needed so we make sure we always use SetDurations() on a newly created value func NewRateSlot(connectFee, rate float64, rateUnit, rateIncrement, grpInterval string) (*RateSlot, error) { rs := &RateSlot{ @@ -132,6 +166,21 @@ type RateSlot struct { tag string // load validation only } +// Clone method for RateSlot +func (rs *RateSlot) Clone() *RateSlot { + return &RateSlot{ + ConnectFee: rs.ConnectFee, + Rate: rs.Rate, + RateUnit: rs.RateUnit, + RateIncrement: rs.RateIncrement, + GroupIntervalStart: rs.GroupIntervalStart, + rateUnitDur: rs.rateUnitDur, + rateIncrementDur: rs.rateIncrementDur, + groupIntervalStartDur: rs.groupIntervalStartDur, + tag: rs.tag, + } +} + // Used to set the durations we need out of strings func (rs *RateSlot) SetDurations() error { var err error @@ -162,6 +211,24 @@ type TPDestinationRate struct { DestinationRates []*DestinationRate // Set of destinationid-rateid bindings } +// Clone method for TPDestinationRate +func (tpdr *TPDestinationRate) Clone() *TPDestinationRate { + clone := &TPDestinationRate{ + TPid: tpdr.TPid, + ID: tpdr.ID, + } + clone.DestinationRates = make([]*DestinationRate, len(tpdr.DestinationRates)) + for i, destRate := range tpdr.DestinationRates { + clone.DestinationRates[i] = destRate.Clone() + } + return clone +} + +// CacheClone returns a clone of TPDestinationRate used by ltcache CacheCloner +func (tpdr *TPDestinationRate) CacheClone() any { + return tpdr.Clone() +} + type DestinationRate struct { DestinationId string // The destination identity RateId string // The rate identity @@ -172,6 +239,22 @@ type DestinationRate struct { MaxCostStrategy string } +// Clone method for DestinationRate +func (dr *DestinationRate) Clone() *DestinationRate { + clone := &DestinationRate{ + DestinationId: dr.DestinationId, + RateId: dr.RateId, + RoundingMethod: dr.RoundingMethod, + RoundingDecimals: dr.RoundingDecimals, + MaxCost: dr.MaxCost, + MaxCostStrategy: dr.MaxCostStrategy, + } + if dr.Rate != nil { + clone.Rate = dr.Rate.Clone() + } + return clone +} + type ApierTPTiming struct { TPid string // Tariff plan id ID string // Timing id @@ -182,6 +265,24 @@ type ApierTPTiming struct { Time string // String representing the time this timing starts on } +// Clone method for ApierTPTiming +func (apt *ApierTPTiming) Clone() *ApierTPTiming { + return &ApierTPTiming{ + TPid: apt.TPid, + ID: apt.ID, + Years: apt.Years, + Months: apt.Months, + MonthDays: apt.MonthDays, + WeekDays: apt.WeekDays, + Time: apt.Time, + } +} + +// CacheClone returns a clone of ApierTPTiming used by ltcache CacheCloner +func (apt *ApierTPTiming) CacheClone() any { + return apt.Clone() +} + type TPTiming struct { ID string Years Years @@ -192,6 +293,27 @@ type TPTiming struct { EndTime string } +// Clone clones *TPTiming +func (tpt *TPTiming) Clone() *TPTiming { + if tpt == nil { + return nil + } + return &TPTiming{ + ID: tpt.ID, + StartTime: tpt.StartTime, + EndTime: tpt.EndTime, + Years: tpt.Years, + Months: tpt.Months, + MonthDays: tpt.MonthDays, + WeekDays: tpt.WeekDays, + } +} + +// CacheClone returns a clone of TPTiming used by ltcache CacheCloner +func (tpt *TPTiming) CacheClone() any { + return tpt.Clone() +} + // Returns wheter the Timing is active at the specified time func (t *TPTiming) IsActiveAt(tm time.Time) bool { // check for years @@ -288,6 +410,24 @@ type TPRatingPlan struct { RatingPlanBindings []*TPRatingPlanBinding // Set of destinationid-rateid bindings } +// Clone method for TPRatingPlan +func (tprp *TPRatingPlan) Clone() *TPRatingPlan { + clone := &TPRatingPlan{ + TPid: tprp.TPid, + ID: tprp.ID, + } + clone.RatingPlanBindings = make([]*TPRatingPlanBinding, len(tprp.RatingPlanBindings)) + for i, binding := range tprp.RatingPlanBindings { + clone.RatingPlanBindings[i] = binding.Clone() + } + return clone +} + +// CacheClone returns a clone of TPRatingPlan used by ltcache CacheCloner +func (tprp *TPRatingPlan) CacheClone() any { + return tprp.Clone() +} + type TPRatingPlanBinding struct { DestinationRatesId string // The DestinationRate identity TimingId string // The timing identity @@ -295,6 +435,19 @@ type TPRatingPlanBinding struct { timing *TPTiming // Not exporting it via JSON } +// Clone method for TPRatingPlanBinding +func (tpb *TPRatingPlanBinding) Clone() *TPRatingPlanBinding { + clone := &TPRatingPlanBinding{ + DestinationRatesId: tpb.DestinationRatesId, + TimingId: tpb.TimingId, + Weight: tpb.Weight, + } + if tpb.timing != nil { + clone.timing = tpb.timing.Clone() + } + return clone +} + func (self *TPRatingPlanBinding) SetTiming(tm *TPTiming) { self.timing = tm } @@ -312,6 +465,27 @@ type TPRatingProfile struct { RatingPlanActivations []*TPRatingActivation // Activate rate profiles at specific time } +// Clone method for TPRatingProfile +func (rpf *TPRatingProfile) Clone() *TPRatingProfile { + clone := &TPRatingProfile{ + TPid: rpf.TPid, + LoadId: rpf.LoadId, + Tenant: rpf.Tenant, + Category: rpf.Category, + Subject: rpf.Subject, + } + clone.RatingPlanActivations = make([]*TPRatingActivation, len(rpf.RatingPlanActivations)) + for i, activation := range rpf.RatingPlanActivations { + clone.RatingPlanActivations[i] = activation.Clone() + } + return clone +} + +// CacheClone returns a clone of TPRatingProfile used by ltcache CacheCloner +func (rpf *TPRatingProfile) CacheClone() any { + return rpf.Clone() +} + // Used as key in nosql db (eg: redis) func (rpf *TPRatingProfile) KeyId() string { return ConcatenatedKey(MetaOut, @@ -360,6 +534,15 @@ type TPRatingActivation struct { FallbackSubjects string // So we follow the api } +// Clone method for TPRatingActivation +func (tpa *TPRatingActivation) Clone() *TPRatingActivation { + return &TPRatingActivation{ + ActivationTime: tpa.ActivationTime, + RatingPlanId: tpa.RatingPlanId, + FallbackSubjects: tpa.FallbackSubjects, + } +} + // FallbackSubjKeys generates keys for dataDB lookup with the format "*out:tenant:tor:subject". func FallbackSubjKeys(tenant, tor, fallbackSubjects string) []string { if fallbackSubjects == "" { @@ -393,6 +576,24 @@ type TPActions struct { Actions []*TPAction // Set of actions this Actions profile will perform } +// Clone method for TPActions +func (tpa *TPActions) Clone() *TPActions { + clone := &TPActions{ + TPid: tpa.TPid, + ID: tpa.ID, + } + clone.Actions = make([]*TPAction, len(tpa.Actions)) + for i, action := range tpa.Actions { + clone.Actions[i] = action.Clone() + } + return clone +} + +// CacheClone returns a clone of TPActions used by ltcache CacheCloner +func (tpa *TPActions) CacheClone() any { + return tpa.Clone() +} + type TPAction struct { Identifier string // Identifier mapped in the code BalanceId string // Balance identification string (account scope) @@ -413,36 +614,131 @@ type TPAction struct { Weight float64 // Action's weight } +// Clone method for TPAction +func (tpa *TPAction) Clone() *TPAction { + return &TPAction{ + Identifier: tpa.Identifier, + BalanceId: tpa.BalanceId, + BalanceUuid: tpa.BalanceUuid, + BalanceType: tpa.BalanceType, + Units: tpa.Units, + ExpiryTime: tpa.ExpiryTime, + Filters: tpa.Filters, + TimingTags: tpa.TimingTags, + DestinationIds: tpa.DestinationIds, + RatingSubject: tpa.RatingSubject, + Categories: tpa.Categories, + SharedGroups: tpa.SharedGroups, + BalanceWeight: tpa.BalanceWeight, + ExtraParameters: tpa.ExtraParameters, + BalanceBlocker: tpa.BalanceBlocker, + BalanceDisabled: tpa.BalanceDisabled, + Weight: tpa.Weight, + } +} + type TPSharedGroups struct { TPid string ID string SharedGroups []*TPSharedGroup } +// Clone method for TPSharedGroups +func (tpsg *TPSharedGroups) Clone() *TPSharedGroups { + clone := &TPSharedGroups{ + TPid: tpsg.TPid, + ID: tpsg.ID, + } + clone.SharedGroups = make([]*TPSharedGroup, len(tpsg.SharedGroups)) + for i, sharedGroup := range tpsg.SharedGroups { + clone.SharedGroups[i] = sharedGroup.Clone() + } + return clone +} + +// CacheClone returns a clone of TPSharedGroups used by ltcache CacheCloner +func (tpsg *TPSharedGroups) CacheClone() any { + return tpsg.Clone() +} + type TPSharedGroup struct { Account string Strategy string RatingSubject string } +// Clone method for TPSharedGroup +func (tpsg *TPSharedGroup) Clone() *TPSharedGroup { + return &TPSharedGroup{ + Account: tpsg.Account, + Strategy: tpsg.Strategy, + RatingSubject: tpsg.RatingSubject, + } +} + type TPActionPlan struct { TPid string // Tariff plan id ID string // ActionPlan id ActionPlan []*TPActionTiming // Set of ActionTiming bindings this profile will group } +// Clone method for TPActionPlan +func (tap *TPActionPlan) Clone() *TPActionPlan { + clone := &TPActionPlan{ + TPid: tap.TPid, + ID: tap.ID, + } + clone.ActionPlan = make([]*TPActionTiming, len(tap.ActionPlan)) + for i, actionTiming := range tap.ActionPlan { + clone.ActionPlan[i] = actionTiming.Clone() + } + return clone +} + +// CacheClone returns a clone of TPActionPlan used by ltcache CacheCloner +func (tap *TPActionPlan) CacheClone() any { + return tap.Clone() +} + type TPActionTiming struct { ActionsId string // Actions id TimingId string // Timing profile id Weight float64 // Binding's weight } +// Clone method for TPActionTiming +func (tat *TPActionTiming) Clone() *TPActionTiming { + return &TPActionTiming{ + ActionsId: tat.ActionsId, + TimingId: tat.TimingId, + Weight: tat.Weight, + } +} + type TPActionTriggers struct { TPid string // Tariff plan id ID string // action trigger id ActionTriggers []*TPActionTrigger // Set of triggers grouped in this profile } +// Clone method for TPActionTriggers +func (tpat *TPActionTriggers) Clone() *TPActionTriggers { + clone := &TPActionTriggers{ + TPid: tpat.TPid, + ID: tpat.ID, + } + clone.ActionTriggers = make([]*TPActionTrigger, len(tpat.ActionTriggers)) + for i, actionTrigger := range tpat.ActionTriggers { + clone.ActionTriggers[i] = actionTrigger.Clone() + } + return clone +} + +// CacheClone returns a clone of TPActionTriggers used by ltcache CacheCloner +func (tpat *TPActionTriggers) CacheClone() any { + return tpat.Clone() +} + type TPActionTrigger struct { Id string // group id UniqueID string // individual id @@ -467,6 +763,33 @@ type TPActionTrigger struct { Weight float64 // weight } +// Clone method for TPActionTrigger +func (tpat *TPActionTrigger) Clone() *TPActionTrigger { + return &TPActionTrigger{ + Id: tpat.Id, + UniqueID: tpat.UniqueID, + ThresholdType: tpat.ThresholdType, + ThresholdValue: tpat.ThresholdValue, + Recurrent: tpat.Recurrent, + MinSleep: tpat.MinSleep, + ExpirationDate: tpat.ExpirationDate, + ActivationDate: tpat.ActivationDate, + BalanceId: tpat.BalanceId, + BalanceType: tpat.BalanceType, + BalanceDestinationIds: tpat.BalanceDestinationIds, + BalanceWeight: tpat.BalanceWeight, + BalanceExpirationDate: tpat.BalanceExpirationDate, + BalanceTimingTags: tpat.BalanceTimingTags, + BalanceRatingSubject: tpat.BalanceRatingSubject, + BalanceCategories: tpat.BalanceCategories, + BalanceSharedGroups: tpat.BalanceSharedGroups, + BalanceBlocker: tpat.BalanceBlocker, + BalanceDisabled: tpat.BalanceDisabled, + ActionsId: tpat.ActionsId, + Weight: tpat.Weight, + } +} + type TPAccountActions struct { TPid string // Tariff plan id LoadId string // LoadId, used to group actions on a load @@ -478,6 +801,25 @@ type TPAccountActions struct { Disabled bool } +// Clone method for TPAccountActions +func (aa *TPAccountActions) Clone() *TPAccountActions { + return &TPAccountActions{ + TPid: aa.TPid, + LoadId: aa.LoadId, + Tenant: aa.Tenant, + Account: aa.Account, + ActionPlanId: aa.ActionPlanId, + ActionTriggersId: aa.ActionTriggersId, + AllowNegative: aa.AllowNegative, + Disabled: aa.Disabled, + } +} + +// CacheClone returns a clone of TPAccountActions used by ltcache CacheCloner +func (aa *TPAccountActions) CacheClone() any { + return aa.Clone() +} + // Returns the id used in some nosql dbs (eg: redis) func (aa *TPAccountActions) KeyId() string { return ConcatenatedKey(aa.Tenant, aa.Account) @@ -964,12 +1306,48 @@ type TPResourceProfile struct { ThresholdIDs []string // Thresholds to check after changing Limit } +// Clone method for TPResourceProfile +func (trp *TPResourceProfile) Clone() *TPResourceProfile { + clone := &TPResourceProfile{ + TPid: trp.TPid, + Tenant: trp.Tenant, + ID: trp.ID, + UsageTTL: trp.UsageTTL, + Limit: trp.Limit, + AllocationMessage: trp.AllocationMessage, + Blocker: trp.Blocker, + Stored: trp.Stored, + Weight: trp.Weight, + } + clone.FilterIDs = make([]string, len(trp.FilterIDs)) + copy(clone.FilterIDs, trp.FilterIDs) + clone.ThresholdIDs = make([]string, len(trp.ThresholdIDs)) + copy(clone.ThresholdIDs, trp.ThresholdIDs) + if trp.ActivationInterval != nil { + clone.ActivationInterval = trp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPResourceProfile used by ltcache CacheCloner +func (trp *TPResourceProfile) CacheClone() any { + return trp.Clone() +} + // TPActivationInterval represents an activation interval for an item type TPActivationInterval struct { ActivationTime string ExpiryTime string } +// Clone method for TPActivationInterval +func (tai *TPActivationInterval) Clone() *TPActivationInterval { + return &TPActivationInterval{ + ActivationTime: tai.ActivationTime, + ExpiryTime: tai.ExpiryTime, + } +} + type ArgsComputeFilterIndexIDs struct { Tenant string Context string @@ -1016,6 +1394,17 @@ type ActivationInterval struct { ExpiryTime time.Time } +// Clone clones ActivationInterval +func (ai *ActivationInterval) Clone() *ActivationInterval { + if ai == nil { + return nil + } + return &ActivationInterval{ + ActivationTime: ai.ActivationTime, + ExpiryTime: ai.ExpiryTime, + } +} + func (ai *ActivationInterval) IsActiveAtTime(atTime time.Time) bool { return (ai.ActivationTime.IsZero() || ai.ActivationTime.Before(atTime)) && (ai.ExpiryTime.IsZero() || ai.ExpiryTime.After(atTime)) @@ -1027,6 +1416,14 @@ type MetricWithFilters struct { MetricID string } +// Clone method for MetricWithFilters +func (mwf *MetricWithFilters) Clone() *MetricWithFilters { + return &MetricWithFilters{ + FilterIDs: slices.Clone(mwf.FilterIDs), + MetricID: mwf.MetricID, + } +} + // TPStatProfile is used in APIs to manage remotely offline StatProfile type TPStatProfile struct { TPid string @@ -1044,6 +1441,38 @@ type TPStatProfile struct { ThresholdIDs []string } +// Clone method for TPStatProfile +func (tsp *TPStatProfile) Clone() *TPStatProfile { + clone := &TPStatProfile{ + TPid: tsp.TPid, + Tenant: tsp.Tenant, + ID: tsp.ID, + QueueLength: tsp.QueueLength, + TTL: tsp.TTL, + Blocker: tsp.Blocker, + Stored: tsp.Stored, + Weight: tsp.Weight, + MinItems: tsp.MinItems, + } + clone.FilterIDs = make([]string, len(tsp.FilterIDs)) + copy(clone.FilterIDs, tsp.FilterIDs) + clone.ThresholdIDs = make([]string, len(tsp.ThresholdIDs)) + copy(clone.ThresholdIDs, tsp.ThresholdIDs) + clone.Metrics = make([]*MetricWithFilters, len(tsp.Metrics)) + for i, metric := range tsp.Metrics { + clone.Metrics[i] = metric.Clone() + } + if tsp.ActivationInterval != nil { + clone.ActivationInterval = tsp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPStatProfile used by ltcache CacheCloner +func (tsp *TPStatProfile) CacheClone() any { + return tsp.Clone() +} + // TPRankingProfile is used in APIs to manage remotely offline RankingProfile type TPRankingProfile struct { TPid string @@ -1058,6 +1487,40 @@ type TPRankingProfile struct { ThresholdIDs []string } +// Clone method for TPRankingProfile +func (trp *TPRankingProfile) Clone() *TPRankingProfile { + clone := &TPRankingProfile{ + TPid: trp.TPid, + Tenant: trp.Tenant, + ID: trp.ID, + Schedule: trp.Schedule, + Sorting: trp.Sorting, + Stored: trp.Stored, + } + if trp.StatIDs != nil { + clone.StatIDs = make([]string, len(trp.StatIDs)) + copy(clone.StatIDs, trp.StatIDs) + } + if trp.MetricIDs != nil { + clone.MetricIDs = make([]string, len(trp.MetricIDs)) + copy(clone.MetricIDs, trp.MetricIDs) + } + if trp.SortingParameters != nil { + clone.SortingParameters = make([]string, len(trp.SortingParameters)) + copy(clone.SortingParameters, trp.SortingParameters) + } + if trp.ThresholdIDs != nil { + clone.ThresholdIDs = make([]string, len(trp.ThresholdIDs)) + copy(clone.ThresholdIDs, trp.ThresholdIDs) + } + return clone +} + +// CacheClone returns a clone of TPRankingProfile used by ltcache CacheCloner +func (trp *TPRankingProfile) CacheClone() any { + return trp.Clone() +} + // MetricWithSettings adds specific settings to the Metric type MetricWithSettings struct { MetricID string @@ -1081,6 +1544,37 @@ type TPTrendsProfile struct { ThresholdIDs []string } +// Clone method for TPTrendsProfile +func (ttp *TPTrendsProfile) Clone() *TPTrendsProfile { + clone := &TPTrendsProfile{ + TPid: ttp.TPid, + Tenant: ttp.Tenant, + ID: ttp.ID, + Schedule: ttp.Schedule, + StatID: ttp.StatID, + TTL: ttp.TTL, + QueueLength: ttp.QueueLength, + MinItems: ttp.MinItems, + CorrelationType: ttp.CorrelationType, + Tolerance: ttp.Tolerance, + Stored: ttp.Stored, + } + if ttp.Metrics != nil { + clone.Metrics = make([]string, len(ttp.Metrics)) + copy(clone.Metrics, ttp.Metrics) + } + if ttp.ThresholdIDs != nil { + clone.ThresholdIDs = make([]string, len(ttp.ThresholdIDs)) + copy(clone.ThresholdIDs, ttp.ThresholdIDs) + } + return clone +} + +// CacheClone returns a clone of TPTrendsProfile used by ltcache CacheCloner +func (ttp *TPTrendsProfile) CacheClone() any { + return ttp.Clone() +} + // TPThresholdProfile is used in APIs to manage remotely offline ThresholdProfile type TPThresholdProfile struct { TPid string @@ -1097,6 +1591,34 @@ type TPThresholdProfile struct { Async bool } +// Clone method for TPThresholdProfile +func (ttp *TPThresholdProfile) Clone() *TPThresholdProfile { + clone := &TPThresholdProfile{ + TPid: ttp.TPid, + Tenant: ttp.Tenant, + ID: ttp.ID, + MaxHits: ttp.MaxHits, + MinHits: ttp.MinHits, + MinSleep: ttp.MinSleep, + Blocker: ttp.Blocker, + Weight: ttp.Weight, + Async: ttp.Async, + } + clone.FilterIDs = make([]string, len(ttp.FilterIDs)) + copy(clone.FilterIDs, ttp.FilterIDs) + clone.ActionIDs = make([]string, len(ttp.ActionIDs)) + copy(clone.ActionIDs, ttp.ActionIDs) + if ttp.ActivationInterval != nil { + clone.ActivationInterval = ttp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPThresholdProfile used by ltcache CacheCloner +func (ttp *TPThresholdProfile) CacheClone() any { + return ttp.Clone() +} + // TPFilterProfile is used in APIs to manage remotely offline FilterProfile type TPFilterProfile struct { TPid string @@ -1106,6 +1628,35 @@ type TPFilterProfile struct { ActivationInterval *TPActivationInterval // Time when this limit becomes active and expires } +// Clone method for TPFilterProfile +func (tfp *TPFilterProfile) Clone() *TPFilterProfile { + clone := &TPFilterProfile{ + TPid: tfp.TPid, + Tenant: tfp.Tenant, + ID: tfp.ID, + } + if len(tfp.Filters) > 0 { + clone.Filters = make([]*TPFilter, len(tfp.Filters)) + for i, filter := range tfp.Filters { + clone.Filters[i] = &TPFilter{ + Type: filter.Type, + Element: filter.Element, + Values: make([]string, len(filter.Values)), + } + copy(clone.Filters[i].Values, filter.Values) + } + } + if tfp.ActivationInterval != nil { + clone.ActivationInterval = tfp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPFilterProfile used by ltcache CacheCloner +func (tfp *TPFilterProfile) CacheClone() any { + return tfp.Clone() +} + // TPFilter is used in TPFilterProfile type TPFilter struct { Type string // Filter type (*string, *timing, *rsr_filters, *cdr_stats) @@ -1126,6 +1677,21 @@ type TPRoute struct { RouteParameters string } +// Clone method for TPRoute +func (r *TPRoute) Clone() *TPRoute { + return &TPRoute{ + ID: r.ID, + Weight: r.Weight, + Blocker: r.Blocker, + RouteParameters: r.RouteParameters, + FilterIDs: slices.Clone(r.FilterIDs), + AccountIDs: slices.Clone(r.AccountIDs), + RatingPlanIDs: slices.Clone(r.RatingPlanIDs), + ResourceIDs: slices.Clone(r.ResourceIDs), + StatIDs: slices.Clone(r.StatIDs), + } +} + // TPRouteProfile is used in APIs to manage remotely offline RouteProfile type TPRouteProfile struct { TPid string @@ -1139,6 +1705,40 @@ type TPRouteProfile struct { Weight float64 } +// Clone method for TPRouteProfile +func (tprp *TPRouteProfile) Clone() *TPRouteProfile { + clone := &TPRouteProfile{ + TPid: tprp.TPid, + Tenant: tprp.Tenant, + ID: tprp.ID, + Sorting: tprp.Sorting, + Weight: tprp.Weight, + } + if tprp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tprp.FilterIDs)) + copy(clone.FilterIDs, tprp.FilterIDs) + } + if tprp.SortingParameters != nil { + clone.SortingParameters = make([]string, len(tprp.SortingParameters)) + copy(clone.SortingParameters, tprp.SortingParameters) + } + if tprp.Routes != nil { + clone.Routes = make([]*TPRoute, len(tprp.Routes)) + for i, route := range tprp.Routes { + clone.Routes[i] = route.Clone() // Use the Clone method of TPRoute + } + } + if tprp.ActivationInterval != nil { + clone.ActivationInterval = tprp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPRouteProfile used by ltcache CacheCloner +func (tprp *TPRouteProfile) CacheClone() any { + return tprp.Clone() +} + // TPAttribute is used in TPAttributeProfile type TPAttribute struct { FilterIDs []string @@ -1147,6 +1747,20 @@ type TPAttribute struct { Value string } +// Clone clones TPAttribute +func (tpa *TPAttribute) Clone() *TPAttribute { + clone := &TPAttribute{ + Path: tpa.Path, + Type: tpa.Type, + Value: tpa.Value, + } + if tpa.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tpa.FilterIDs)) + copy(clone.FilterIDs, tpa.FilterIDs) + } + return clone +} + // TPAttributeProfile is used in APIs to manage remotely offline AttributeProfile type TPAttributeProfile struct { TPid string @@ -1160,6 +1774,40 @@ type TPAttributeProfile struct { Weight float64 } +// Clone clones TPAttributeProfile +func (tpap *TPAttributeProfile) Clone() *TPAttributeProfile { + clone := &TPAttributeProfile{ + TPid: tpap.TPid, + Tenant: tpap.Tenant, + ID: tpap.ID, + Blocker: tpap.Blocker, + Weight: tpap.Weight, + } + if tpap.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tpap.FilterIDs)) + copy(clone.FilterIDs, tpap.FilterIDs) + } + if tpap.Contexts != nil { + clone.Contexts = make([]string, len(tpap.Contexts)) + copy(clone.Contexts, tpap.Contexts) + } + if tpap.Attributes != nil { + clone.Attributes = make([]*TPAttribute, len(tpap.Attributes)) + for i, attribute := range tpap.Attributes { + clone.Attributes[i] = attribute.Clone() // Use the Clone method of TPAttribute + } + } + if tpap.ActivationInterval != nil { + clone.ActivationInterval = tpap.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPAttributeProfile used by ltcache CacheCloner +func (tpap *TPAttributeProfile) CacheClone() any { + return tpap.Clone() +} + // TPChargerProfile is used in APIs to manage remotely offline ChargerProfile type TPChargerProfile struct { TPid string @@ -1172,6 +1820,34 @@ type TPChargerProfile struct { Weight float64 } +// Clone clones TPChargerProfile +func (tpcp *TPChargerProfile) Clone() *TPChargerProfile { + clone := &TPChargerProfile{ + TPid: tpcp.TPid, + Tenant: tpcp.Tenant, + ID: tpcp.ID, + RunID: tpcp.RunID, + Weight: tpcp.Weight, + } + if tpcp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tpcp.FilterIDs)) + copy(clone.FilterIDs, tpcp.FilterIDs) + } + if tpcp.ActivationInterval != nil { + clone.ActivationInterval = tpcp.ActivationInterval.Clone() + } + if tpcp.AttributeIDs != nil { + clone.AttributeIDs = make([]string, len(tpcp.AttributeIDs)) + copy(clone.AttributeIDs, tpcp.AttributeIDs) + } + return clone +} + +// CacheClone returns a clone of TPChargerProfile used by ltcache CacheCloner +func (tpcp *TPChargerProfile) CacheClone() any { + return tpcp.Clone() +} + type TPTntID struct { TPid string Tenant string @@ -1192,6 +1868,44 @@ type TPDispatcherProfile struct { Hosts []*TPDispatcherHostProfile } +// Clone clones TPDispatcherProfile +func (tpdp *TPDispatcherProfile) Clone() *TPDispatcherProfile { + clone := &TPDispatcherProfile{ + TPid: tpdp.TPid, + Tenant: tpdp.Tenant, + ID: tpdp.ID, + Strategy: tpdp.Strategy, + Weight: tpdp.Weight, + } + if tpdp.Subsystems != nil { + clone.Subsystems = make([]string, len(tpdp.Subsystems)) + copy(clone.Subsystems, tpdp.Subsystems) + } + if tpdp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tpdp.FilterIDs)) + copy(clone.FilterIDs, tpdp.FilterIDs) + } + if tpdp.StrategyParams != nil { + clone.StrategyParams = make([]any, len(tpdp.StrategyParams)) + copy(clone.StrategyParams, tpdp.StrategyParams) + } + if tpdp.Hosts != nil { + clone.Hosts = make([]*TPDispatcherHostProfile, len(tpdp.Hosts)) + for i, host := range tpdp.Hosts { + clone.Hosts[i] = host.Clone() + } + } + if tpdp.ActivationInterval != nil { + clone.ActivationInterval = tpdp.ActivationInterval.Clone() + } + return clone +} + +// CacheClone returns a clone of TPDispatcherProfile used by ltcache CacheCloner +func (tpdp *TPDispatcherProfile) CacheClone() any { + return tpdp.Clone() +} + // TPDispatcherHostProfile is used in TPDispatcherProfile type TPDispatcherHostProfile struct { ID string @@ -1201,6 +1915,24 @@ type TPDispatcherHostProfile struct { Blocker bool // no connection after this one } +// Clone clones TPDispatcherHostProfile +func (tpdhp *TPDispatcherHostProfile) Clone() *TPDispatcherHostProfile { + clone := &TPDispatcherHostProfile{ + ID: tpdhp.ID, + Weight: tpdhp.Weight, + Blocker: tpdhp.Blocker, + } + if tpdhp.FilterIDs != nil { + clone.FilterIDs = make([]string, len(tpdhp.FilterIDs)) + copy(clone.FilterIDs, tpdhp.FilterIDs) + } + if tpdhp.Params != nil { + clone.Params = make([]any, len(tpdhp.Params)) + copy(clone.Params, tpdhp.Params) + } + return clone +} + // TPDispatcherHost is used in APIs to manage remotely offline DispatcherHost type TPDispatcherHost struct { TPid string @@ -1209,6 +1941,24 @@ type TPDispatcherHost struct { Conn *TPDispatcherHostConn } +// Clone clones TPDispatcherHost +func (tpdh *TPDispatcherHost) Clone() *TPDispatcherHost { + clone := &TPDispatcherHost{ + TPid: tpdh.TPid, + Tenant: tpdh.Tenant, + ID: tpdh.ID, + } + if tpdh.Conn != nil { + clone.Conn = tpdh.Conn.Clone() + } + return clone +} + +// CacheClone returns a clone of TPDispatcherHost used by ltcache CacheCloner +func (tpdh *TPDispatcherHost) CacheClone() any { + return tpdh.Clone() +} + // TPDispatcherHostConn is used in TPDispatcherHost type TPDispatcherHostConn struct { Address string @@ -1224,6 +1974,23 @@ type TPDispatcherHostConn struct { CaCertificate string } +// Clone clones TPDispatcherHostConn +func (tpdhc *TPDispatcherHostConn) Clone() *TPDispatcherHostConn { + return &TPDispatcherHostConn{ + Address: tpdhc.Address, + Transport: tpdhc.Transport, + ConnectAttempts: tpdhc.ConnectAttempts, + Reconnects: tpdhc.Reconnects, + MaxReconnectInterval: tpdhc.MaxReconnectInterval, + ConnectTimeout: tpdhc.ConnectTimeout, + ReplyTimeout: tpdhc.ReplyTimeout, + TLS: tpdhc.TLS, + ClientKey: tpdhc.ClientKey, + ClientCertificate: tpdhc.ClientCertificate, + CaCertificate: tpdhc.CaCertificate, + } +} + type UsageInterval struct { Min *time.Duration Max *time.Duration diff --git a/utils/consts.go b/utils/consts.go index b1fe4fc27..eea030bd5 100644 --- a/utils/consts.go +++ b/utils/consts.go @@ -2180,30 +2180,36 @@ const ( // DataDbCfg const ( - DataDbTypeCfg = "db_type" - DataDbHostCfg = "db_host" - DataDbPortCfg = "db_port" - DataDbNameCfg = "db_name" - DataDbUserCfg = "db_user" - DataDbPassCfg = "db_password" - RedisMaxConnsCfg = "redisMaxConns" - RedisConnectAttemptsCfg = "redisConnectAttempts" - RedisSentinelNameCfg = "redisSentinel" - RedisClusterCfg = "redisCluster" - RedisClusterSyncCfg = "redisClusterSync" - RedisClusterOnDownDelayCfg = "redisClusterOndownDelay" - RedisPoolPipelineWindowCfg = "redisPoolPipelineWindow" - RedisPoolPipelineLimitCfg = "redisPoolPipelineLimit" - RedisConnectTimeoutCfg = "redisConnectTimeout" - RedisReadTimeoutCfg = "redisReadTimeout" - RedisWriteTimeoutCfg = "redisWriteTimeout" - RedisTLS = "redisTLS" - RedisClientCertificate = "redisClientCertificate" - RedisClientKey = "redisClientKey" - RedisCACertificate = "redisCACertificate" - ReplicationFilteredCfg = "replication_filtered" - ReplicationCache = "replication_cache" - RemoteConnIDCfg = "remote_conn_id" + DataDbTypeCfg = "db_type" + DataDbHostCfg = "db_host" + DataDbPortCfg = "db_port" + DataDbNameCfg = "db_name" + DataDbUserCfg = "db_user" + DataDbPassCfg = "db_password" + InternalDBDumpPathCfg = "internalDBDumpPath" + InternalDBBackupPathCfg = "internalDBBackupPath" + InternalDBStartTimeoutCfg = "internalDBStartTimeout" + InternalDBDumpIntervalCfg = "internalDBDumpInterval" + InternalDBRewriteIntervalCfg = "internalDBRewriteInterval" + InternalDBWriteLimitCfg = "internalDBWriteLimit" + RedisMaxConnsCfg = "redisMaxConns" + RedisConnectAttemptsCfg = "redisConnectAttempts" + RedisSentinelNameCfg = "redisSentinel" + RedisClusterCfg = "redisCluster" + RedisClusterSyncCfg = "redisClusterSync" + RedisClusterOnDownDelayCfg = "redisClusterOndownDelay" + RedisPoolPipelineWindowCfg = "redisPoolPipelineWindow" + RedisPoolPipelineLimitCfg = "redisPoolPipelineLimit" + RedisConnectTimeoutCfg = "redisConnectTimeout" + RedisReadTimeoutCfg = "redisReadTimeout" + RedisWriteTimeoutCfg = "redisWriteTimeout" + RedisTLS = "redisTLS" + RedisClientCertificate = "redisClientCertificate" + RedisClientKey = "redisClientKey" + RedisCACertificate = "redisCACertificate" + ReplicationFilteredCfg = "replication_filtered" + ReplicationCache = "replication_cache" + RemoteConnIDCfg = "remote_conn_id" ) // ItemOpt diff --git a/utils/rsrfilters.go b/utils/rsrfilters.go index 22fb14f26..b87e69b1e 100644 --- a/utils/rsrfilters.go +++ b/utils/rsrfilters.go @@ -62,6 +62,21 @@ type RSRFilter struct { negative bool // Rule should not match } +// Clone method for RSRFilter struct +func (rsrFltr *RSRFilter) Clone() *RSRFilter { + if rsrFltr == nil { + return nil + } + rsrf := &RSRFilter{ + filterRule: rsrFltr.filterRule, + negative: rsrFltr.negative, + } + if rsrFltr.fltrRgxp != nil { + rsrf.fltrRgxp = rsrFltr.fltrRgxp.Copy() + } + return rsrf +} + func (rsrFltr *RSRFilter) FilterRule() string { return rsrFltr.filterRule }