Disable Cache when using internalDB as DataDB

This commit is contained in:
TeoV
2019-11-06 15:11:17 +02:00
committed by Dan Christian Bogos
parent 4723692abd
commit ab843fa6c5
9 changed files with 80 additions and 7 deletions

View File

@@ -104,7 +104,7 @@ func TestAttributeSITMongo(t *testing.T) {
func TestAttributeSITInternal(t *testing.T) {
alsPrfConfigDIR = "tutinternal"
for _, stest := range sTestsAlsPrf {
for _, stest := range sTestsAlsPrf[:29] {
t.Run(alsPrfConfigDIR, stest)
}
}

View File

@@ -207,6 +207,9 @@ func testDispatcherSUpdateDispatcherProfile(t *testing.T) {
}
func testDispatcherSGetDispatcherProfileCache(t *testing.T) {
if dispatcherConfigDIR == "tutinternal" {
t.SkipNow()
}
var rcvStats map[string]*ltcache.CacheStats
if err := dispatcherRPC.Call(utils.CacheSv1GetCacheStats, nil, &rcvStats); err != nil {
t.Error(err)

View File

@@ -81,3 +81,47 @@ func (cCfg CacheCfg) AsTransCacheConfig() (tcCfg map[string]*ltcache.CacheConfig
}
return
}
// CGRATES_CFG_JSON_DISABLED_CACHE is used to populate cache config when DataDB is internal
const CGRATES_CFG_JSON_DISABLED_CACHE = `
{
"cache":{
"destinations": {"limit": 0},
"reverse_destinations": {"limit": 0},
"rating_plans": {"limit": 0},
"rating_profiles": {"limit": 0},
"actions": {"limit": 0},
"action_plans": {"limit": 0},
"account_action_plans": {"limit": 0},
"action_triggers": {"limit": 0},
"shared_groups": {"limit": 0},
"timings": {"limit": 0},
"resource_profiles": {"limit": 0},
"resources": {"limit": 0},
"event_resources": {"limit": 0},
"statqueue_profiles": {"limit": 0},
"statqueues": {"limit": 0},
"threshold_profiles": {"limit": 0},
"thresholds": {"limit": 0},
"filters": {"limit": 0},
"supplier_profiles": {"limit": 0},
"attribute_profiles": {"limit": 0},
"charger_profiles": {"limit": 0},
"dispatcher_profiles": {"limit": 0},
"dispatcher_hosts": {"limit": 0},
"resource_filter_indexes" : {"limit": 0},
"stat_filter_indexes" : {"limit": 0},
"threshold_filter_indexes" : {"limit": 0},
"supplier_filter_indexes" : {"limit": 0},
"attribute_filter_indexes" : {"limit": 0},
"charger_filter_indexes" : {"limit": 0},
"dispatcher_filter_indexes" : {"limit": 0},
"dispatcher_routes": {"limit": 0},
"diameter_messages": {"limit": 0},
"rpc_responses": {"limit": 0},
"closed_sessions": {"limit": 0},
"load_ids": {"limit": 0},
},
}`

View File

@@ -777,7 +777,26 @@ func (cfg *CGRConfig) loadDataDBCfg(jsnCfg *CgrJsonCfg) (err error) {
if jsnDataDbCfg, err = jsnCfg.DbJsonCfg(DATADB_JSN); err != nil {
return
}
return cfg.dataDbCfg.loadFromJsonCfg(jsnDataDbCfg)
if err = cfg.dataDbCfg.loadFromJsonCfg(jsnDataDbCfg); err != nil {
return
}
// in case of internalDB we need to disable the cache
// so we enforce it here
if cfg.dataDbCfg.DataDbType == utils.INTERNAL {
var customCfg *CgrJsonCfg
var cacheJsonCfg *CacheJsonCfg
if customCfg, err = NewCgrJsonCfgFromBytes([]byte(CGRATES_CFG_JSON_DISABLED_CACHE)); err != nil {
return
}
if cacheJsonCfg, err = customCfg.CacheJsonCfg(); err != nil {
return
}
if err = cfg.cacheCfg.loadFromJsonCfg(cacheJsonCfg); err != nil {
return
}
}
return
}
// loadStorDBCfg loads the StorDB section of the configuration

View File

@@ -15,14 +15,17 @@
"http": ":2080",
},
"data_db": {
"db_type": "*internal",
},
"stor_db": {
"db_type": "*internal",
},
"rals": {
"enabled": true,
"thresholds_conns": [
@@ -63,7 +66,7 @@
"resources": {
"enabled": true,
"store_interval": "1s",
"store_interval": "-1",
"thresholds_conns": [
{"address": "*internal"}
],
@@ -72,7 +75,7 @@
"stats": {
"enabled": true,
"store_interval": "1s",
"store_interval": "-1",
"thresholds_conns": [
{"address": "*internal"}
],
@@ -80,7 +83,7 @@
"thresholds": {
"enabled": true,
"store_interval": "1s",
"store_interval": "-1",
},

View File

@@ -667,6 +667,7 @@ func (rS *ResourceService) V1AllocateResource(args utils.ArgRSv1ResourceUsage, r
continue
}
if rS.cgrcfg.ResourceSCfg().StoreInterval == -1 {
*r.dirty = true
rS.StoreResource(r)
} else {
*r.dirty = true // mark it to be saved

View File

@@ -142,7 +142,7 @@ func (sS *StatService) StoreStatQueue(sq *StatQueue) (err error) {
sq.TenantID(), err.Error()))
return
}
//since we no longer handle cache in DataManager do here a manul caching
//since we no longer handle cache in DataManager do here a manual caching
if err = sS.dm.CacheDataFromDB(utils.StatQueuePrefix, []string{sq.TenantID()}, true); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<StatS> failed caching StatQueue with ID: %s, error: %s",
@@ -259,6 +259,7 @@ func (sS *StatService) processEvent(args *StatsArgsProcessEvent) (statQueueIDs [
}
if sS.cgrcfg.StatSCfg().StoreInterval != 0 && sq.dirty != nil { // don't save
if sS.cgrcfg.StatSCfg().StoreInterval == -1 {
*sq.dirty = true
sS.StoreStatQueue(sq)
} else {
*sq.dirty = true // mark it to be saved

View File

@@ -40,7 +40,8 @@ type InternalDB struct {
}
func NewInternalDB(stringIndexedFields, prefixIndexedFields []string) *InternalDB {
return &InternalDB{db: ltcache.NewTransCache(config.CgrConfig().CacheCfg().AsTransCacheConfig()),
dfltCfg, _ := config.NewDefaultCGRConfig()
return &InternalDB{db: ltcache.NewTransCache(dfltCfg.CacheCfg().AsTransCacheConfig()),
ms: NewCodecMsgpackMarshaler(), stringIndexedFields: stringIndexedFields,
prefixIndexedFields: prefixIndexedFields, cnter: utils.NewCounter(time.Now().UnixNano(), 0)}
}

View File

@@ -320,6 +320,7 @@ func (tS *ThresholdService) processEvent(args *ArgsProcessEvent) (thresholdsIDs
t.Snooze = time.Now().Add(t.tPrfl.MinSleep)
// recurrent threshold
if tS.cgrcfg.ThresholdSCfg().StoreInterval == -1 {
*t.dirty = true
tS.StoreThreshold(t)
} else {
*t.dirty = true // mark it to be saved