Merge branch 'master' into master

This commit is contained in:
Dan Christian Bogos
2017-10-24 18:50:29 +02:00
committed by GitHub
7 changed files with 185 additions and 149 deletions

View File

@@ -80,6 +80,37 @@ func (rfi *ReqFilterIndexer) IndexFilters(itemID string, reqFltrs []*RequestFilt
return
}
// IndexFilters parses reqFltrs, adding itemID in the indexes and marks the changed keys in chngdIndxKeys
func (rfi *ReqFilterIndexer) IndexTPFilter(tpFltr *utils.TPFilter, itemID string) {
var hasMetaString bool
for _, fltr := range tpFltr.Filters {
if fltr.Type != MetaString {
continue
}
hasMetaString = true // Mark that we found at least one metatring so we don't index globally
if _, hastIt := rfi.indexes[fltr.FieldName]; !hastIt {
rfi.indexes[fltr.FieldName] = make(map[string]utils.StringMap)
}
for _, fldVal := range fltr.Values {
if _, hasIt := rfi.indexes[fltr.FieldName][fldVal]; !hasIt {
rfi.indexes[fltr.FieldName][fldVal] = make(utils.StringMap)
}
rfi.indexes[fltr.FieldName][fldVal][itemID] = true
rfi.chngdIndxKeys[utils.ConcatenatedKey(fltr.FieldName, fldVal)] = true
}
}
if !hasMetaString {
if _, hasIt := rfi.indexes[utils.NOT_AVAILABLE]; !hasIt {
rfi.indexes[utils.NOT_AVAILABLE] = make(map[string]utils.StringMap)
}
if _, hasIt := rfi.indexes[utils.NOT_AVAILABLE][utils.NOT_AVAILABLE]; !hasIt {
rfi.indexes[utils.NOT_AVAILABLE][utils.NOT_AVAILABLE] = make(utils.StringMap)
}
rfi.indexes[utils.NOT_AVAILABLE][utils.NOT_AVAILABLE][itemID] = true // Fields without real field index will be located in map[NOT_AVAILABLE][NOT_AVAILABLE][rl.ID]
}
return
}
// StoreIndexes handles storing the indexes to dataDB
func (rfi *ReqFilterIndexer) StoreIndexes() error {
return rfi.dm.DataDB().SetReqFilterIndexes(rfi.dbKey, rfi.indexes)

View File

@@ -1510,84 +1510,83 @@ func TestLoadThresholdProfiles(t *testing.T) {
}
func TestLoadFilters(t *testing.T) {
eFilters := map[string]map[string]*utils.TPFilter{
"cgrates.org": map[string]*utils.TPFilter{
"FLTR_1": &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_1",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Account",
Type: "*string",
Values: []string{"1001", "1002"},
},
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*string_prefix",
Values: []string{"10", "20"},
},
&utils.TPRequestFilter{
FieldName: "",
Type: "*rsr_fields",
Values: []string{"Subject(~^1.*1$)", "Destination(1002)"},
},
eFilters := map[utils.TenantID]*utils.TPFilter{
utils.TenantID{"cgrates.org", "FLTR_1"}: &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_1",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Account",
Type: "*string",
Values: []string{"1001", "1002"},
},
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*string_prefix",
Values: []string{"10", "20"},
},
&utils.TPRequestFilter{
FieldName: "",
Type: "*rsr_fields",
Values: []string{"Subject(~^1.*1$)", "Destination(1002)"},
},
},
"FLTR_ACNT_dan": &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_ACNT_dan",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Account",
Type: "*string",
Values: []string{"dan"},
},
},
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
},
},
utils.TenantID{"cgrates.org", "FLTR_ACNT_dan"}: &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_ACNT_dan",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Account",
Type: "*string",
Values: []string{"dan"},
},
},
"FLTR_DST_DE": &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_DST_DE",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*destinations",
Values: []string{"DST_DE"},
},
},
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
},
},
utils.TenantID{"cgrates.org", "FLTR_DST_DE"}: &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_DST_DE",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*destinations",
Values: []string{"DST_DE"},
},
},
"FLTR_DST_NL": &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_DST_NL",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*destinations",
Values: []string{"DST_NL"},
},
},
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
},
},
utils.TenantID{"cgrates.org", "FLTR_DST_NL"}: &utils.TPFilter{
TPid: testTPID,
Tenant: "cgrates.org",
ID: "FLTR_DST_NL",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
FieldName: "Destination",
Type: "*destinations",
Values: []string{"DST_NL"},
},
},
ActivationInterval: &utils.TPActivationInterval{
ActivationTime: "2014-07-29T15:00:00Z",
},
},
}
if len(csvr.filters["cgrates.org"]) != len(eFilters["cgrates.org"]) {
fltrKey := utils.TenantID{"cgrates.org", "FLTR_1"}
if len(csvr.filters) != len(eFilters) {
t.Errorf("Failed to load FilterProfiles: %s", utils.ToIJSON(csvr.filters))
} else if !reflect.DeepEqual(eFilters["cgrates.org"]["FLTR_1"], csvr.filters["cgrates.org"]["FLTR_1"]) {
t.Errorf("Expecting: %+v, received: %+v", eFilters["cgrates.org"]["FLTR_1"], csvr.filters["cgrates.org"]["FLTR_1"])
} else if !reflect.DeepEqual(eFilters[fltrKey], csvr.filters[fltrKey]) {
t.Errorf("Expecting: %+v, received: %+v", eFilters[fltrKey], csvr.filters[fltrKey])
}
}

View File

@@ -2419,3 +2419,28 @@ func APItoFilter(tpTH *utils.TPFilter, timezone string) (th *Filter, err error)
}
return th, nil
}
func FilterToTPFilter(f *Filter) (tpFltr *utils.TPFilter) {
tpFltr = &utils.TPFilter{
Tenant: f.Tenant,
ID: f.ID,
Filters: make([]*utils.TPRequestFilter, len(f.RequestFilters)),
}
for i, reqFltr := range f.RequestFilters {
tpFltr.Filters[i] = &utils.TPRequestFilter{
Type: reqFltr.Type,
FieldName: reqFltr.FieldName,
Values: make([]string, len(reqFltr.Values)),
}
for j, val := range reqFltr.Values {
tpFltr.Filters[i].Values[j] = val
}
}
if f.ActivationInterval != nil {
tpFltr.ActivationInterval = &utils.TPActivationInterval{
ActivationTime: f.ActivationInterval.ActivationTime.Format(time.RFC3339),
ExpiryTime: f.ActivationInterval.ExpiryTime.Format(time.RFC3339),
}
}
return
}

View File

@@ -250,11 +250,11 @@ func (tS *ThresholdService) StoreThreshold(t *Threshold) (err error) {
// matchingThresholdsForEvent returns ordered list of matching thresholds which are active for an Event
func (tS *ThresholdService) matchingThresholdsForEvent(ev *ThresholdEvent) (ts Thresholds, err error) {
matchingTs := make(map[string]*Threshold)
tIDs, err := matchingItemIDsForEvent(ev.Event, tS.dm, utils.ThresholdsIndex+ev.Tenant)
tIDs, err := matchingItemIDsForEvent(ev.Event, tS.dm, utils.ThresholdStringIndex+ev.Tenant)
if err != nil {
return nil, err
}
lockIDs := utils.PrefixSliceItems(tIDs.Slice(), utils.ThresholdsIndex)
lockIDs := utils.PrefixSliceItems(tIDs.Slice(), utils.ThresholdStringIndex)
guardian.Guardian.GuardIDs(config.CgrConfig().LockingTimeout, lockIDs...)
defer guardian.Guardian.UnguardIDs(lockIDs...)
for tID := range tIDs {

View File

@@ -57,13 +57,14 @@ type TpReader struct {
resProfiles map[string]map[string]*utils.TPResource
sqProfiles map[string]map[string]*utils.TPStats
thProfiles map[string]map[string]*utils.TPThreshold
filters map[string]map[string]*utils.TPFilter
filters map[utils.TenantID]*utils.TPFilter
resources []*utils.TenantID // IDs of resources which need creation based on resourceProfiles
statQueues []*utils.TenantID // IDs of statQueues which need creation based on statQueueProfiles
thresholds []*utils.TenantID // IDs of thresholds which need creation based on thresholdProfiles
revDests,
revAliases,
acntActionPlans map[string][]string
thdsIndexers map[string]*ReqFilterIndexer // tenant, indexer
}
func NewTpReader(db DataDB, lr LoadReader, tpid, timezone string) *TpReader {
@@ -135,10 +136,11 @@ func (tpr *TpReader) Init() {
tpr.resProfiles = make(map[string]map[string]*utils.TPResource)
tpr.sqProfiles = make(map[string]map[string]*utils.TPStats)
tpr.thProfiles = make(map[string]map[string]*utils.TPThreshold)
tpr.filters = make(map[string]map[string]*utils.TPFilter)
tpr.filters = make(map[utils.TenantID]*utils.TPFilter)
tpr.revDests = make(map[string][]string)
tpr.revAliases = make(map[string][]string)
tpr.acntActionPlans = make(map[string][]string)
tpr.thdsIndexers = make(map[string]*ReqFilterIndexer)
}
func (tpr *TpReader) LoadDestinationsFiltered(tag string) (bool, error) {
@@ -1658,8 +1660,9 @@ func (tpr *TpReader) LoadStats() error {
return tpr.LoadStatsFiltered("")
}
func (tpr *TpReader) LoadThresholdsFiltered(tag string) error {
tps, err := tpr.lr.GetTPThresholds(tpr.tpid, tag)
func (tpr *TpReader) LoadThresholdsFiltered(tag string) (err error) {
tps, err := tpr.lr.GetTPThreshold(tpr.tpid, tag)
if err != nil {
return err
}
@@ -1672,13 +1675,35 @@ func (tpr *TpReader) LoadThresholdsFiltered(tag string) error {
}
tpr.thProfiles = mapTHs
for tenant, mpID := range mapTHs {
for thID := range mpID {
thdIndxrKey := utils.ThresholdStringIndex + tenant
for thID, t := range mpID {
thTntID := &utils.TenantID{Tenant: tenant, ID: thID}
if has, err := tpr.dm.DataDB().HasData(utils.ThresholdPrefix, thTntID.TenantID()); err != nil {
return err
} else if !has {
tpr.thresholds = append(tpr.thresholds, thTntID)
}
// index thresholds for filters
if _, has := tpr.thdsIndexers[tenant]; !has {
if tpr.thdsIndexers[tenant], err = NewReqFilterIndexer(tpr.dm, thdIndxrKey); err != nil {
return
}
}
for _, fltrID := range t.FilterIDs {
tpFltr, has := tpr.filters[utils.TenantID{tenant, fltrID}]
if !has {
var fltr *Filter
if fltr, err = tpr.dm.GetFilter(tenant, fltrID, false, utils.NonTransactional); err != nil {
if err == utils.ErrNotFound {
err = fmt.Errorf("broken reference to filter: %s for threshold: %s", fltrID, thID)
}
return
} else {
tpFltr = FilterToTPFilter(fltr)
}
}
tpr.thdsIndexers[tenant].IndexTPFilter(tpFltr, thID)
}
}
}
return nil
@@ -1693,12 +1718,9 @@ func (tpr *TpReader) LoadFilterFiltered(tag string) error {
if err != nil {
return err
}
mapTHs := make(map[string]map[string]*utils.TPFilter)
mapTHs := make(map[utils.TenantID]*utils.TPFilter)
for _, th := range tps {
if _, has := mapTHs[th.Tenant]; !has {
mapTHs[th.Tenant] = make(map[string]*utils.TPFilter)
}
mapTHs[th.Tenant][th.ID] = th
mapTHs[utils.TenantID{th.Tenant, th.ID}] = th
}
tpr.filters = mapTHs
return nil
@@ -2001,6 +2023,21 @@ func (tpr *TpReader) WriteToDatabase(flush, verbose, disable_reverse bool) (err
log.Printf("\t %s : %+v", id, vals)
}
}
if verbose {
log.Print("Filters:")
}
for _, tpTH := range tpr.filters {
th, err := APItoFilter(tpTH, tpr.timezone)
if err != nil {
return err
}
if err = tpr.dm.SetFilter(th); err != nil {
return err
}
if verbose {
log.Print("\t", th.TenantID())
}
}
if verbose {
log.Print("ResourceProfiles:")
}
@@ -2094,23 +2131,6 @@ func (tpr *TpReader) WriteToDatabase(flush, verbose, disable_reverse bool) (err
log.Print("\t", thd.TenantID())
}
}
if verbose {
log.Print("Filters:")
}
for _, mpID := range tpr.filters {
for _, tpTH := range mpID {
th, err := APItoFilter(tpTH, tpr.timezone)
if err != nil {
return err
}
if err = tpr.dm.SetFilter(th); err != nil {
return err
}
if verbose {
log.Print("\t", th.TenantID())
}
}
}
if verbose {
log.Print("Timings:")
}
@@ -2195,54 +2215,15 @@ func (tpr *TpReader) WriteToDatabase(flush, verbose, disable_reverse bool) (err
}
}
}
/*
if len(tpr.thProfiles) > 0 {
if verbose {
log.Print("Indexing thresholds")
}
for tenant, mpID := range tpr.thProfiles {
stIdxr, err := NewReqFilterIndexer(tpr.dm, utils.ThresholdsIndex+tenant)
if err != nil {
return err
}
for _, tpTH := range mpID {
if th, err := APItoThresholdProfile(tpTH, tpr.timezone); err != nil {
return err
} else {
stIdxr.IndexFilters(th.ID, th.Filters)
}
}
if verbose {
log.Printf("Indexed thresholds tenant: %s, keys %+v", tenant, stIdxr.ChangedKeys().Slice())
}
if err := stIdxr.StoreIndexes(); err != nil {
return err
}
}
if verbose {
log.Print("Threshold filter indexes:")
}
for tenant, fltrIdxer := range tpr.thdsIndexers {
if err := fltrIdxer.StoreIndexes(); err != nil {
return err
}
*/
if len(tpr.filters) > 0 {
if verbose {
log.Print("Indexing Filters")
}
for tenant, mpID := range tpr.filters {
stIdxr, err := NewReqFilterIndexer(tpr.dm, utils.FilterIndex+tenant)
if err != nil {
return err
}
for _, tpTH := range mpID {
if th, err := APItoFilter(tpTH, tpr.timezone); err != nil {
return err
} else {
stIdxr.IndexFilters(th.ID, th.RequestFilters)
}
}
if verbose {
log.Printf("Indexed filters tenant: %s, keys %+v", tenant, stIdxr.ChangedKeys().Slice())
}
if err := stIdxr.StoreIndexes(); err != nil {
return err
}
log.Printf("Tenant: %s, keys %+v", tenant, fltrIdxer.ChangedKeys().Slice())
}
}
}
@@ -2467,7 +2448,7 @@ func (tpr *TpReader) GetLoadedIds(categ string) ([]string, error) {
keys := make([]string, len(tpr.filters))
i := 0
for k := range tpr.filters {
keys[i] = k
keys[i] = k.TenantID()
i++
}
return keys, nil

View File

@@ -1287,12 +1287,6 @@ type TPResource struct {
Thresholds []string // Thresholds to check after changing Limit
}
type TPRequestFilter struct {
Type string // Filter type (*string, *timing, *rsr_filters, *cdr_stats)
FieldName string // Name of the field providing us the Values to check (used in case of some )
Values []string // Filter definition
}
// TPActivationInterval represents an activation interval for an item
type TPActivationInterval struct {
ActivationTime,
@@ -1381,3 +1375,9 @@ type TPFilter struct {
Filters []*TPRequestFilter
ActivationInterval *TPActivationInterval // Time when this limit becomes active and expires
}
type TPRequestFilter struct {
Type string // Filter type (*string, *timing, *rsr_filters, *cdr_stats)
FieldName string // Name of the field providing us the Values to check (used in case of some )
Values []string // Filter definition
}

View File

@@ -249,7 +249,7 @@ const (
ResourceProfilesPrefix = "rsp_"
StatQueuesStringIndex = "ssi_"
ThresholdPrefix = "thd_"
ThresholdsIndex = "thi_"
ThresholdStringIndex = "tsi_"
TimingsPrefix = "tmg_"
FilterPrefix = "ftr_"
FilterIndex = "fti_"