Adding ResourceLimiter CacheResourceLimits and indexStringFilters methods

This commit is contained in:
DanB
2016-08-02 18:26:54 +02:00
parent 502aaaf173
commit f50959124f
3 changed files with 193 additions and 1 deletions

View File

@@ -42,10 +42,82 @@ type ResourceLimit struct {
type ResourceLimiterService struct {
sync.RWMutex
stringIndexes map[string]map[string]utils.StringMap // map[fieldName]map[fieldValue]utils.StringMap[resourceID]
dataDB AccountingStorage // So we can load the data in cache and index it
}
// Index cached ResourceLimits with MetaString filter types
func (rls *ResourceLimiterService) indexStringFilters(rlIDs []string) error {
newStringIndexes := make(map[string]map[string]utils.StringMap) // Index it transactionally
var cacheIDsToIndex []string // Cache keys of RLs to be indexed
if rlIDs == nil {
cacheIDsToIndex = CacheGetEntriesKeys(utils.ResourceLimitsPrefix)
} else {
for _, rlID := range rlIDs {
cacheIDsToIndex = append(cacheIDsToIndex, utils.ResourceLimitsPrefix+rlID)
}
}
for _, cacheKey := range cacheIDsToIndex {
x, err := CacheGet(cacheKey)
if err != nil {
return err
}
rl := x.(*ResourceLimit)
for _, fltr := range rl.Filters {
if fltr.Type != MetaString {
continue
}
if _, hastIt := newStringIndexes[fltr.FieldName]; !hastIt {
newStringIndexes[fltr.FieldName] = make(map[string]utils.StringMap)
}
for _, fldVal := range fltr.Values {
if _, hasIt := newStringIndexes[fltr.FieldName][fldVal]; !hasIt {
newStringIndexes[fltr.FieldName][fldVal] = make(utils.StringMap)
}
newStringIndexes[fltr.FieldName][fldVal][rl.ID] = true
}
}
}
rls.Lock()
defer rls.Unlock()
if rlIDs == nil { // We have rebuilt complete index
rls.stringIndexes = newStringIndexes
return nil
}
// Merge the indexes since we have only performed limited indexing
for fldNameKey, mpFldName := range newStringIndexes {
if _, hasIt := rls.stringIndexes[fldNameKey]; !hasIt {
rls.stringIndexes[fldNameKey] = mpFldName
} else {
for fldValKey, strMap := range newStringIndexes[fldNameKey] {
if _, hasIt := rls.stringIndexes[fldNameKey][fldValKey]; !hasIt {
rls.stringIndexes[fldNameKey][fldValKey] = strMap
} else {
for resIDKey := range newStringIndexes[fldNameKey][fldValKey] {
rls.stringIndexes[fldNameKey][fldValKey][resIDKey] = true
}
}
}
}
}
return nil
}
// Called when cache/re-caching is necessary
func (rls *ResourceLimiterService) CacheResourceLimits(loadID string, rlIDs []string) error {
if len(rlIDs) == 0 {
return nil
}
if err := rls.dataDB.CacheAccountingPrefixValues(loadID, map[string][]string{utils.ResourceLimitsPrefix: rlIDs}); err != nil {
return err
}
return rls.indexStringFilters(rlIDs)
}
// Called to start the service
func (rls *ResourceLimiterService) Start() error {
if err := rls.CacheResourceLimits("ResourceLimiterServiceStart", nil); err != nil {
return err
}
return nil
}

121
engine/reslimiter_test.go Normal file
View File

@@ -0,0 +1,121 @@
/*
Real-time Charging System for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package engine
import (
"reflect"
"testing"
"time"
"github.com/cgrates/cgrates/utils"
)
func TestIndexStringFilters(t *testing.T) {
rls := []*ResourceLimit{
&ResourceLimit{
ID: "RL1",
Weight: 20,
Filters: []*RequestFilter{
&RequestFilter{Type: MetaString, FieldName: "Account", Values: []string{"1001", "1002"}},
&RequestFilter{Type: MetaRSRFields, Values: []string{"Subject(~^1.*1$)", "Destination(1002)"},
rsrFields: utils.ParseRSRFieldsMustCompile("Subject(~^1.*1$);Destination(1002)", utils.INFIELD_SEP),
}},
ActivationTime: time.Date(2014, 7, 3, 13, 43, 0, 1, time.UTC),
Limit: 2,
},
&ResourceLimit{
ID: "RL2",
Weight: 10,
Filters: []*RequestFilter{
&RequestFilter{Type: MetaString, FieldName: "Account", Values: []string{"dan", "1002"}},
&RequestFilter{Type: MetaString, FieldName: "Subject", Values: []string{"dan"}},
},
ActivationTime: time.Date(2014, 7, 3, 13, 43, 0, 1, time.UTC),
Limit: 1,
},
}
for _, rl := range rls {
CacheSet(utils.ResourceLimitsPrefix+rl.ID, rl)
}
rLS := new(ResourceLimiterService)
eIndexes := map[string]map[string]utils.StringMap{
"Account": map[string]utils.StringMap{
"1001": utils.StringMap{
"RL1": true,
},
"1002": utils.StringMap{
"RL1": true,
"RL2": true,
},
"dan": utils.StringMap{
"RL2": true,
},
},
"Subject": map[string]utils.StringMap{
"dan": utils.StringMap{
"RL2": true,
},
},
}
if err := rLS.indexStringFilters(nil); err != nil {
t.Error(err)
} else if !reflect.DeepEqual(eIndexes, rLS.stringIndexes) {
t.Errorf("Expecting: %+v, received: %+v", eIndexes, rLS.stringIndexes)
}
rl3 := &ResourceLimit{
ID: "RL3",
Weight: 10,
Filters: []*RequestFilter{
&RequestFilter{Type: MetaString, FieldName: "Subject", Values: []string{"dan"}},
&RequestFilter{Type: MetaString, FieldName: "Subject", Values: []string{"1003"}},
},
ActivationTime: time.Date(2014, 7, 3, 13, 43, 0, 1, time.UTC),
Limit: 1,
}
CacheSet(utils.ResourceLimitsPrefix+rl3.ID, rl3)
eIndexes = map[string]map[string]utils.StringMap{
"Account": map[string]utils.StringMap{
"1001": utils.StringMap{
"RL1": true,
},
"1002": utils.StringMap{
"RL1": true,
"RL2": true,
},
"dan": utils.StringMap{
"RL2": true,
},
},
"Subject": map[string]utils.StringMap{
"dan": utils.StringMap{
"RL2": true,
"RL3": true,
},
"1003": utils.StringMap{
"RL3": true,
},
},
}
// Test index update
if err := rLS.indexStringFilters([]string{rl3.ID}); err != nil {
t.Error(err)
} else if !reflect.DeepEqual(eIndexes, rLS.stringIndexes) {
t.Errorf("Expecting: %+v, received: %+v", eIndexes, rLS.stringIndexes)
}
}

View File

@@ -380,7 +380,6 @@ func (rs *RedisStorage) CacheAccountingPrefixValues(loadID string, prefixes map[
}
pm[prefix] = ids
}
utils.Logger.Debug(fmt.Sprintf("### CacheAccountingPrefixValues pm: %+v", pm))
return rs.cacheAccounting(loadID, pm[utils.ALIASES_PREFIX], pm[utils.ResourceLimitsPrefix])
}