From a93522d8a1014d5d68569a23b2799c5b08652efe Mon Sep 17 00:00:00 2001 From: TeoV Date: Mon, 2 Oct 2017 16:12:12 +0300 Subject: [PATCH] Add FilterProfile + test for it --- apier/v1/apier.go | 1 + apier/v1/resourcesv1_it_test.go | 3 +- apier/v1/tp_it_test.go | 2 +- apier/v2/apier.go | 1 + cmd/cgr-loader/cgr-loader.go | 1 + .../mysql/create_tariffplan_tables.sql | 19 ++++++ .../postgres/create_tariffplan_tables.sql | 19 ++++++ data/tariffplans/testtp/Filters.csv | 7 +++ data/tariffplans/tutorial/Filters.csv | 7 +++ engine/filters.go | 35 +++++++++++ engine/libtest.go | 1 + engine/loader_csv_test.go | 11 +++- engine/loader_it_test.go | 4 ++ engine/model_helpers.go | 60 ++++++++++++++++++ engine/model_helpers_test.go | 52 ++++++++++++++- engine/models.go | 11 ++++ engine/onstor_it_test.go | 33 ++++++++++ engine/storage_csv.go | 42 ++++++++++--- engine/storage_interface.go | 5 ++ engine/storage_map.go | 45 +++++++++++++ engine/storage_mongo_datadb.go | 52 +++++++++++++++ engine/storage_mongo_stordb.go | 31 +++++++++ engine/storage_redis.go | 44 ++++++++++++- engine/storage_sql.go | 40 +++++++++++- engine/tp_reader.go | 63 ++++++++++++++++++- engine/tpexporter.go | 10 +++ engine/tpimporter_csv.go | 13 ++++ general_tests/acntacts_test.go | 3 +- general_tests/auth_test.go | 3 +- general_tests/costs1_test.go | 2 +- general_tests/datachrg1_test.go | 2 +- general_tests/ddazmbl1_test.go | 3 +- general_tests/ddazmbl2_test.go | 3 +- general_tests/ddazmbl3_test.go | 3 +- general_tests/smschrg1_test.go | 2 +- utils/apitpdata.go | 10 +++ utils/consts.go | 4 ++ 37 files changed, 625 insertions(+), 22 deletions(-) create mode 100644 data/tariffplans/testtp/Filters.csv create mode 100644 data/tariffplans/tutorial/Filters.csv create mode 100644 engine/filters.go diff --git a/apier/v1/apier.go b/apier/v1/apier.go index b22cef873..da6bee82b 100644 --- a/apier/v1/apier.go +++ b/apier/v1/apier.go @@ -1498,6 +1498,7 @@ func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, path.Join(attrs.FolderPath, utils.ResourcesCsv), path.Join(attrs.FolderPath, utils.StatsCsv), path.Join(attrs.FolderPath, utils.ThresholdsCsv), + path.Join(attrs.FolderPath, utils.FiltersCsv), ), "", self.Config.DefaultTimezone) if err := loader.LoadAll(); err != nil { return utils.NewErrServerError(err) diff --git a/apier/v1/resourcesv1_it_test.go b/apier/v1/resourcesv1_it_test.go index 96860e325..73bc18a63 100644 --- a/apier/v1/resourcesv1_it_test.go +++ b/apier/v1/resourcesv1_it_test.go @@ -131,7 +131,6 @@ func testV1RsFromFolder(t *testing.T) { } func testV1RsGetResourcesForEvent(t *testing.T) { - time.Sleep(time.Duration(1000) * time.Millisecond) var reply *[]*engine.ResourceProfile args := &utils.ArgRSv1ResourceUsage{ Tenant: "cgrates.org", @@ -139,10 +138,12 @@ func testV1RsGetResourcesForEvent(t *testing.T) { if err := rlsV1Rpc.Call("ResourceSV1.GetResourcesForEvent", args, &reply); err == nil || err.Error() != utils.ErrNotFound.Error() { t.Error(err) } + time.Sleep(time.Duration(500) * time.Millisecond) args.Event = map[string]interface{}{"Destination": "10"} if err := rlsV1Rpc.Call("ResourceSV1.GetResourcesForEvent", args, &reply); err != nil { t.Error(err) } + time.Sleep(time.Duration(500) * time.Millisecond) if len(*reply) != 1 { t.Errorf("Expecting: %+v, received: %+v", 1, len(*reply)) } diff --git a/apier/v1/tp_it_test.go b/apier/v1/tp_it_test.go index b4c5c628e..4c623c4ba 100644 --- a/apier/v1/tp_it_test.go +++ b/apier/v1/tp_it_test.go @@ -130,7 +130,7 @@ func testTPExportTPToFolder(t *testing.T) { Compressed: true, ExportPath: "/tmp/", ExportedFiles: []string{"RatingProfiles.csv", "CdrStats.csv", "Users.csv", "RatingPlans.csv", "Actions.csv", "AccountActions.csv", - "Timings.csv", "SharedGroups.csv", "ActionPlans.csv", "ActionTriggers.cs", "DerivedChargers.csv", "Resources.csv", "Stats.csv", "Thresholds.csv", "Destinations.csv", "Rates.csv", "DestinationRates.csv"}, + "Timings.csv", "SharedGroups.csv", "ActionPlans.csv", "ActionTriggers.cs", "DerivedChargers.csv", "Resources.csv", "Stats.csv", "Thresholds.csv", "Destinations.csv", "Rates.csv", "DestinationRates.csv", "Filters.csv"}, } tpid := "TEST_TPID2" compress := true diff --git a/apier/v2/apier.go b/apier/v2/apier.go index eed1e9596..b516ec2b0 100644 --- a/apier/v2/apier.go +++ b/apier/v2/apier.go @@ -143,6 +143,7 @@ func (self *ApierV2) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, path.Join(attrs.FolderPath, utils.ResourcesCsv), path.Join(attrs.FolderPath, utils.StatsCsv), path.Join(attrs.FolderPath, utils.ThresholdsCsv), + path.Join(attrs.FolderPath, utils.FiltersCsv), ), "", self.Config.DefaultTimezone) if err := loader.LoadAll(); err != nil { return utils.NewErrServerError(err) diff --git a/cmd/cgr-loader/cgr-loader.go b/cmd/cgr-loader/cgr-loader.go index 1bfdecdac..5a4ad8bd7 100755 --- a/cmd/cgr-loader/cgr-loader.go +++ b/cmd/cgr-loader/cgr-loader.go @@ -152,6 +152,7 @@ func main() { path.Join(*dataPath, utils.ResourcesCsv), path.Join(*dataPath, utils.StatsCsv), path.Join(*dataPath, utils.ThresholdsCsv), + path.Join(*dataPath, utils.FiltersCsv), ) } tpReader := engine.NewTpReader(dataDB, loader, *tpid, *timezone) diff --git a/data/storage/mysql/create_tariffplan_tables.sql b/data/storage/mysql/create_tariffplan_tables.sql index 840f01d38..f4ec7fd69 100644 --- a/data/storage/mysql/create_tariffplan_tables.sql +++ b/data/storage/mysql/create_tariffplan_tables.sql @@ -471,6 +471,25 @@ CREATE TABLE tp_thresholds ( UNIQUE KEY `unique_tp_thresholds` (`tpid`,`tenant`, `id`, `filter_type`, `filter_field_name`) ); +-- +-- Table structure for table `tp_filter` +-- + +DROP TABLE IF EXISTS tp_filters; +CREATE TABLE tp_filters ( + `pk` int(11) NOT NULL AUTO_INCREMENT, + `tpid` varchar(64) NOT NULL, + `tenant` varchar(64) NOT NULL, + `id` varchar(64) NOT NULL, + `filter_type` varchar(16) NOT NULL, + `filter_field_name` varchar(64) NOT NULL, + `filter_field_values` varchar(256) NOT NULL, + `created_at` TIMESTAMP, + PRIMARY KEY (`pk`), + KEY `tpid` (`tpid`), + UNIQUE KEY `unique_tp_filters` (`tpid`,`tenant`, `id`, `filter_type`, `filter_field_name`) +); + -- -- Table structure for table `versions` -- diff --git a/data/storage/postgres/create_tariffplan_tables.sql b/data/storage/postgres/create_tariffplan_tables.sql index 663098a48..b53256962 100644 --- a/data/storage/postgres/create_tariffplan_tables.sql +++ b/data/storage/postgres/create_tariffplan_tables.sql @@ -465,6 +465,25 @@ CREATE TABLE tp_thresholds ( CREATE INDEX tp_thresholds_idx ON tp_thresholds (tpid); CREATE INDEX tp_thresholds_unique ON tp_thresholds ("tpid","tenant", "id", "filter_type", "filter_field_name"); +-- +-- Table structure for table `tp_filter` +-- + +DROP TABLE IF EXISTS tp_filters; +CREATE TABLE tp_filters ( + "pk" SERIAL PRIMARY KEY, + "tpid" varchar(64) NOT NULL, + "tenant" varchar(64) NOT NULL, + "id" varchar(64) NOT NULL, + "filter_type" varchar(16) NOT NULL, + "filter_field_name" varchar(64) NOT NULL, + "filter_field_values" varchar(256) NOT NULL, + "created_at" TIMESTAMP WITH TIME ZONE +); + CREATE INDEX tp_filters_idx ON tp_filters (tpid); + CREATE INDEX tp_filters_unique ON tp_filters ("tpid","tenant", "id", "filter_type", "filter_field_name"); + + -- -- Table structure for table `versions` diff --git a/data/tariffplans/testtp/Filters.csv b/data/tariffplans/testtp/Filters.csv new file mode 100644 index 000000000..5a90d9838 --- /dev/null +++ b/data/tariffplans/testtp/Filters.csv @@ -0,0 +1,7 @@ +#Tenant[0],ID[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4] +cgrates.org,FLTR_1,*string,Account,1001;1002 +cgrates.org,FLTR_1,*string_prefix,Destination,10;20 +cgrates.org,FLTR_1,*rsr_fields,,Subject(~^1.*1$);Destination(1002) +cgrates.org,FLTR_ACNT_dan,*string,Account,dan +cgrates.org,FLTR_DST_DE,*destinations,Destination,DST_DE +cgrates.org,FLTR_DST_NL,*destinations,Destination,DST_NL diff --git a/data/tariffplans/tutorial/Filters.csv b/data/tariffplans/tutorial/Filters.csv new file mode 100644 index 000000000..5a90d9838 --- /dev/null +++ b/data/tariffplans/tutorial/Filters.csv @@ -0,0 +1,7 @@ +#Tenant[0],ID[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4] +cgrates.org,FLTR_1,*string,Account,1001;1002 +cgrates.org,FLTR_1,*string_prefix,Destination,10;20 +cgrates.org,FLTR_1,*rsr_fields,,Subject(~^1.*1$);Destination(1002) +cgrates.org,FLTR_ACNT_dan,*string,Account,dan +cgrates.org,FLTR_DST_DE,*destinations,Destination,DST_DE +cgrates.org,FLTR_DST_NL,*destinations,Destination,DST_NL diff --git a/engine/filters.go b/engine/filters.go new file mode 100644 index 000000000..36eeef7fd --- /dev/null +++ b/engine/filters.go @@ -0,0 +1,35 @@ +/* +Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments +Copyright (C) ITsysCOM GmbH + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package engine + +import ( + "github.com/cgrates/cgrates/utils" +) + +type FilterProfile struct { + Tenant string + ID string + FilterType string + FilterFieldName string + FilterFieldValues []string +} + +func (tp *FilterProfile) TenantID() string { + return utils.ConcatenatedKey(tp.Tenant, tp.ID) +} diff --git a/engine/libtest.go b/engine/libtest.go index 33b7cd245..3eecc6a7f 100644 --- a/engine/libtest.go +++ b/engine/libtest.go @@ -134,6 +134,7 @@ func LoadTariffPlanFromFolder(tpPath, timezone string, dataDB DataDB, disable_re path.Join(tpPath, utils.ResourcesCsv), path.Join(tpPath, utils.StatsCsv), path.Join(tpPath, utils.ThresholdsCsv), + path.Join(tpPath, utils.FiltersCsv), ), "", timezone) if err := loader.LoadAll(); err != nil { return utils.NewErrServerError(err) diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index 700eb640e..7619d4f5d 100755 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -280,6 +280,15 @@ cgrates.org,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*a thresholds = ` #Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],Recurrent[6],MinSleep[7],Blocker[8],Weight[9],ActionIDs[10] cgrates.org,Threshold1,*string,Account,1001;1002,2014-07-29T15:00:00Z,true,1s,true,10,THRESH1;THRESH2 +` + filters = ` +#Tenant[0],ID[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4] +cgrates.org,FLTR_1,*string,Account,1001;1002 +cgrates.org,FLTR_1,*string_prefix,Destination,10;20 +cgrates.org,FLTR_1,*rsr_fields,,Subject(~^1.*1$);Destination(1002) +cgrates.org,FLTR_ACNT_dan,*string,Account,dan +cgrates.org,FLTR_DST_DE,*destinations,Destination,DST_DE +cgrates.org,FLTR_DST_NL,*destinations,Destination,DST_NL ` ) @@ -287,7 +296,7 @@ var csvr *TpReader func init() { csvr = NewTpReader(dataStorage, NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resProfiles, stats, thresholds), testTPID, "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resProfiles, stats, thresholds, filters), testTPID, "") if err := csvr.LoadDestinations(); err != nil { log.Print("error in LoadDestinations:", err) diff --git a/engine/loader_it_test.go b/engine/loader_it_test.go index a74385760..23b4b927f 100755 --- a/engine/loader_it_test.go +++ b/engine/loader_it_test.go @@ -107,6 +107,7 @@ func TestLoaderITLoadFromCSV(t *testing.T) { path.Join(*dataDir, "tariffplans", *tpCsvScenario, utils.ResourcesCsv), path.Join(*dataDir, "tariffplans", *tpCsvScenario, utils.StatsCsv), path.Join(*dataDir, "tariffplans", *tpCsvScenario, utils.ThresholdsCsv), + path.Join(*dataDir, "tariffplans", *tpCsvScenario, utils.FiltersCsv), ), "", "") if err = loader.LoadDestinations(); err != nil { @@ -160,6 +161,9 @@ func TestLoaderITLoadFromCSV(t *testing.T) { if err = loader.LoadThresholds(); err != nil { t.Error("Failed loading thresholds: ", err.Error()) } + if err = loader.LoadFilter(); err != nil { + t.Error("Failed loading thresholds: ", err.Error()) + } if err := loader.WriteToDatabase(true, false, false); err != nil { t.Error("Could not write data into dataDb: ", err.Error()) } diff --git a/engine/model_helpers.go b/engine/model_helpers.go index c1c0b7e61..bf6b4efcf 100755 --- a/engine/model_helpers.go +++ b/engine/model_helpers.go @@ -2251,3 +2251,63 @@ func APItoThresholdProfile(tpTH *utils.TPThreshold, timezone string) (th *Thresh } return th, nil } + +type TpFilterS []*TpFilter + +func (tps TpFilterS) AsTPFilter() (result []*utils.TPFilter) { + mst := make(map[string]*utils.TPFilter) + for _, tp := range tps { + th, found := mst[tp.ID] + if !found { + th = &utils.TPFilter{ + TPid: tp.Tpid, + Tenant: tp.Tenant, + ID: tp.ID, + FilterType: tp.FilterType, + FilterFieldName: tp.FilterFieldName, + } + } + if tp.FilterFieldValues != "" { + th.FilterFielValues = append(th.FilterFielValues, strings.Split(tp.FilterFieldValues, utils.INFIELD_SEP)...) + } + mst[tp.ID] = th + } + result = make([]*utils.TPFilter, len(mst)) + i := 0 + for _, th := range mst { + result[i] = th + i++ + } + return +} + +func APItoModelTPFilter(th *utils.TPFilter) (mdls TpFilterS) { + mdl := &TpFilter{ + Tpid: th.TPid, + Tenant: th.Tenant, + ID: th.ID, + FilterFieldName: th.FilterFieldName, + FilterType: th.FilterType, + } + for i, val := range th.FilterFielValues { + if i != 0 { + mdl.FilterFieldValues += utils.INFIELD_SEP + } + mdl.FilterFieldValues += val + } + mdls = append(mdls, mdl) + return +} + +func APItoFilterProfile(tpTH *utils.TPFilter) (th *FilterProfile, err error) { + th = &FilterProfile{ + Tenant: tpTH.Tenant, + ID: tpTH.ID, + FilterFieldName: tpTH.FilterFieldName, + FilterType: tpTH.FilterType, + } + for _, ati := range tpTH.FilterFielValues { + th.FilterFieldValues = append(th.FilterFieldValues, ati) + } + return th, nil +} diff --git a/engine/model_helpers_test.go b/engine/model_helpers_test.go index 3de1930a5..9678002e6 100755 --- a/engine/model_helpers_test.go +++ b/engine/model_helpers_test.go @@ -939,7 +939,7 @@ func TestAsTPThresholdAsAsTPThreshold(t *testing.T) { tps := []*TpThreshold{ &TpThreshold{ Tpid: "TEST_TPID", - ID: "Stats1", + ID: "Threhold", FilterType: MetaStringPrefix, FilterFieldName: "Account", FilterFieldValues: "1001;1002", @@ -1014,3 +1014,53 @@ func TestAPItoTPThreshold(t *testing.T) { t.Errorf("Expecting: %+v, received: %+v", eTPs, st) } } + +func TestTPFilterAsTPFilter(t *testing.T) { + tps := []*TpFilter{ + &TpFilter{ + Tpid: "TEST_TPID", + ID: "Filter1", + FilterType: MetaStringPrefix, + FilterFieldName: "Account", + FilterFieldValues: "1001;1002", + }, + } + eTPs := []*utils.TPFilter{ + &utils.TPFilter{ + TPid: tps[0].Tpid, + ID: tps[0].ID, + FilterType: tps[0].FilterType, + FilterFieldName: tps[0].FilterFieldName, + FilterFielValues: []string{"1001", "1002"}, + }, + } + + rcvTPs := TpFilterS(tps).AsTPFilter() + if !(reflect.DeepEqual(eTPs, rcvTPs) || reflect.DeepEqual(eTPs[0], rcvTPs[0])) { + t.Errorf("\nExpecting:\n%+v\nReceived:\n%+v", utils.ToIJSON(eTPs), utils.ToIJSON(rcvTPs)) + } +} + +func TestAPItoTPFilter(t *testing.T) { + tps := &utils.TPFilter{ + TPid: testTPID, + Tenant: "cgrates.org", + ID: "Filter1", + FilterType: "*string", + FilterFieldName: "Acount", + FilterFielValues: []string{"1001", "1002"}, + } + + eTPs := &FilterProfile{ + Tenant: "cgrates.org", + ID: tps.ID, + FilterFieldName: tps.FilterFieldName, + FilterType: tps.FilterType, + FilterFieldValues: tps.FilterFielValues, + } + if st, err := APItoFilterProfile(tps); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(eTPs, st) { + t.Errorf("Expecting: %+v, received: %+v", eTPs, st) + } +} diff --git a/engine/models.go b/engine/models.go index 129b29936..8fe8c001f 100755 --- a/engine/models.go +++ b/engine/models.go @@ -516,3 +516,14 @@ type TpThreshold struct { ActionIDs string `index:"10" re:""` CreatedAt time.Time } + +type TpFilter struct { + PK uint `gorm:"primary_key"` + Tpid string + Tenant string `index:"0" re:""` + ID string `index:"1" re:""` + FilterType string `index:"2" re:"^\*[A-Za-z].*"` + FilterFieldName string `index:"3" re:""` + FilterFieldValues string `index:"4" re:""` + CreatedAt time.Time +} diff --git a/engine/onstor_it_test.go b/engine/onstor_it_test.go index 125011fcb..12417feae 100644 --- a/engine/onstor_it_test.go +++ b/engine/onstor_it_test.go @@ -94,6 +94,7 @@ var sTestsOnStorIT = []func(t *testing.T){ testOnStorITCRUDStoredStatQueue, testOnStorITCRUDThresholdProfile, testOnStorITCRUDThreshold, + testOnStorITCRUDFilterProfile, } func TestOnStorITRedisConnect(t *testing.T) { @@ -2118,3 +2119,35 @@ func testOnStorITCRUDThreshold(t *testing.T) { t.Error(rcvErr) } } + +func testOnStorITCRUDFilterProfile(t *testing.T) { + fp := &FilterProfile{ + Tenant: "cgrates.org", + ID: "Filter1", + FilterFieldName: "*string", + FilterType: "Account", + FilterFieldValues: []string{"1001", "1002"}, + } + if _, rcvErr := onStor.GetFilterProfile("cgrates.org", "Filter1", true, utils.NonTransactional); rcvErr != nil && rcvErr != utils.ErrNotFound { + t.Error(rcvErr) + } + if err := onStor.SetFilterProfile(fp); err != nil { + t.Error(err) + } + if rcv, err := onStor.GetFilterProfile("cgrates.org", "Filter1", true, utils.NonTransactional); err != nil { + t.Error(err) + } else if !(reflect.DeepEqual(fp, rcv)) { + t.Errorf("Expecting: %v, received: %v", fp, rcv) + } + if rcv, err := onStor.GetFilterProfile("cgrates.org", "Filter1", false, utils.NonTransactional); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(fp, rcv) { + t.Errorf("Expecting: %v, received: %v", fp, rcv) + } + if err := onStor.RemoveFilterProfile(fp.Tenant, fp.ID, utils.NonTransactional); err != nil { + t.Error(err) + } + if _, rcvErr := onStor.GetFilterProfile("cgrates.org", "Filter1", true, utils.NonTransactional); rcvErr != nil && rcvErr != utils.ErrNotFound { + t.Error(rcvErr) + } +} diff --git a/engine/storage_csv.go b/engine/storage_csv.go index de7116bc9..481a31b79 100755 --- a/engine/storage_csv.go +++ b/engine/storage_csv.go @@ -33,26 +33,26 @@ type CSVStorage struct { readerFunc func(string, rune, int) (*csv.Reader, *os.File, error) // file names destinationsFn, ratesFn, destinationratesFn, timingsFn, destinationratetimingsFn, ratingprofilesFn, - sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn string + sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn, filterFn string } func NewFileCSVStorage(sep rune, destinationsFn, timingsFn, ratesFn, destinationratesFn, destinationratetimingsFn, ratingprofilesFn, sharedgroupsFn, lcrFn, - actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn string) *CSVStorage { + actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn, filterFn string) *CSVStorage { c := new(CSVStorage) c.sep = sep c.readerFunc = openFileCSVStorage c.destinationsFn, c.timingsFn, c.ratesFn, c.destinationratesFn, c.destinationratetimingsFn, c.ratingprofilesFn, - c.sharedgroupsFn, c.lcrFn, c.actionsFn, c.actiontimingsFn, c.actiontriggersFn, c.accountactionsFn, c.derivedChargersFn, c.cdrStatsFn, c.usersFn, c.aliasesFn, c.resProfilesFn, c.statsFn, c.thresholdsFn = destinationsFn, timingsFn, - ratesFn, destinationratesFn, destinationratetimingsFn, ratingprofilesFn, sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn + c.sharedgroupsFn, c.lcrFn, c.actionsFn, c.actiontimingsFn, c.actiontriggersFn, c.accountactionsFn, c.derivedChargersFn, c.cdrStatsFn, c.usersFn, c.aliasesFn, c.resProfilesFn, c.statsFn, c.thresholdsFn, c.filterFn = destinationsFn, timingsFn, + ratesFn, destinationratesFn, destinationratetimingsFn, ratingprofilesFn, sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn, filterFn return c } func NewStringCSVStorage(sep rune, destinationsFn, timingsFn, ratesFn, destinationratesFn, destinationratetimingsFn, ratingprofilesFn, sharedgroupsFn, lcrFn, - actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn string) *CSVStorage { + actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn, filterFn string) *CSVStorage { c := NewFileCSVStorage(sep, destinationsFn, timingsFn, ratesFn, destinationratesFn, destinationratetimingsFn, - ratingprofilesFn, sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn) + ratingprofilesFn, sharedgroupsFn, lcrFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn, derivedChargersFn, cdrStatsFn, usersFn, aliasesFn, resProfilesFn, statsFn, thresholdsFn, filterFn) c.readerFunc = openStringCSVStorage return c } @@ -652,7 +652,7 @@ func (csvs *CSVStorage) GetTPStats(tpid, id string) ([]*utils.TPStats, error) { func (csvs *CSVStorage) GetTPThreshold(tpid, id string) ([]*utils.TPThreshold, error) { csvReader, fp, err := csvs.readerFunc(csvs.thresholdsFn, csvs.sep, getColumnCount(TpThreshold{})) if err != nil { - //log.Print("Could not load stats file: ", err) + //log.Print("Could not load threshold file: ", err) // allow writing of the other values return nil, nil } @@ -677,6 +677,34 @@ func (csvs *CSVStorage) GetTPThreshold(tpid, id string) ([]*utils.TPThreshold, e return tpThreshold.AsTPThreshold(), nil } +func (csvs *CSVStorage) GetTPFilter(tpid, id string) ([]*utils.TPFilter, error) { + csvReader, fp, err := csvs.readerFunc(csvs.filterFn, csvs.sep, getColumnCount(TpFilter{})) + if err != nil { + //log.Print("Could not load filter file: ", err) + // allow writing of the other values + return nil, nil + } + if fp != nil { + defer fp.Close() + } + var tpFilter TpFilterS + for record, err := csvReader.Read(); err != io.EOF; record, err = csvReader.Read() { + if err != nil { + log.Print("bad line in TPFilter csv: ", err) + return nil, err + } + if filterCfg, err := csvLoad(TpFilter{}, record); err != nil { + log.Print("error loading TPThreshold: ", err) + return nil, err + } else { + fIlterCfg := filterCfg.(TpFilter) + fIlterCfg.Tpid = tpid + tpFilter = append(tpFilter, &fIlterCfg) + } + } + return tpFilter.AsTPFilter(), nil +} + func (csvs *CSVStorage) GetTpIds() ([]string, error) { return nil, utils.ErrNotImplemented } diff --git a/engine/storage_interface.go b/engine/storage_interface.go index 4dc6ed658..561470acc 100755 --- a/engine/storage_interface.go +++ b/engine/storage_interface.go @@ -126,6 +126,9 @@ type DataDB interface { GetThreshold(string, string, bool, string) (*Threshold, error) SetThreshold(*Threshold) error RemoveThreshold(string, string, string) error + GetFilterProfile(string, string, bool, string) (*FilterProfile, error) + SetFilterProfile(*FilterProfile) error + RemoveFilterProfile(string, string, string) error // CacheDataFromDB loads data to cache, prefix represents the cache prefix, IDs should be nil if all available data should be loaded CacheDataFromDB(prefix string, IDs []string, mustBeCached bool) error // ToDo: Move this to dataManager } @@ -174,6 +177,7 @@ type LoadReader interface { GetTPResources(string, string) ([]*utils.TPResource, error) GetTPStats(string, string) ([]*utils.TPStats, error) GetTPThreshold(string, string) ([]*utils.TPThreshold, error) + GetTPFilter(string, string) ([]*utils.TPFilter, error) } type LoadWriter interface { @@ -197,6 +201,7 @@ type LoadWriter interface { SetTPResources([]*utils.TPResource) error SetTPStats([]*utils.TPStats) error SetTPThreshold([]*utils.TPThreshold) error + SetTPFilter([]*utils.TPFilter) error } // NewMarshaler returns the marshaler type selected by mrshlerStr diff --git a/engine/storage_map.go b/engine/storage_map.go index 0c6b6d46c..d7d9d5ba0 100755 --- a/engine/storage_map.go +++ b/engine/storage_map.go @@ -1669,6 +1669,51 @@ func (ms *MapStorage) RemoveThreshold(tenant, id string, transactionID string) ( return } +func (ms *MapStorage) GetFilterProfile(tenant, id string, skipCache bool, transactionID string) (r *FilterProfile, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() + key := utils.FilterProfilePrefix + utils.ConcatenatedKey(tenant, id) + if !skipCache { + if x, ok := cache.Get(key); ok { + if x != nil { + return x.(*FilterProfile), nil + } + return nil, utils.ErrNotFound + } + } + values, ok := ms.dict[key] + if !ok { + cache.Set(key, nil, cacheCommit(transactionID), transactionID) + return nil, utils.ErrNotFound + } + err = ms.ms.Unmarshal(values, r) + if err != nil { + return nil, err + } + cache.Set(key, r, cacheCommit(transactionID), transactionID) + return +} + +func (ms *MapStorage) SetFilterProfile(r *FilterProfile) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() + result, err := ms.ms.Marshal(r) + if err != nil { + return err + } + ms.dict[utils.FilterProfilePrefix+utils.ConcatenatedKey(r.Tenant, r.ID)] = result + return +} + +func (ms *MapStorage) RemoveFilterProfile(tenant, id string, transactionID string) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() + key := utils.FilterProfilePrefix + utils.ConcatenatedKey(tenant, id) + delete(ms.dict, key) + cache.RemKey(key, cacheCommit(transactionID), transactionID) + return +} + func (ms *MapStorage) GetVersions(itm string) (vrs Versions, err error) { ms.mu.Lock() defer ms.mu.Unlock() diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go index 9d285e92e..94e13112e 100755 --- a/engine/storage_mongo_datadb.go +++ b/engine/storage_mongo_datadb.go @@ -63,6 +63,7 @@ const ( colSqp = "statqueue_profiles" colTlds = "threshold_profiles" colThs = "thresholds" + colFlt = "filter_profiles" ) var ( @@ -332,6 +333,7 @@ func (ms *MongoStorage) getColNameForPrefix(prefix string) (name string, ok bool utils.ResourceProfilesPrefix: colRsP, utils.ThresholdProfilePrefix: colTlds, utils.ThresholdPrefix: colThs, + utils.FilterProfilePrefix: colFlt, } name, ok = colMap[prefix] return @@ -542,6 +544,9 @@ func (ms *MongoStorage) CacheDataFromDB(prfx string, ids []string, mustBeCached case utils.ThresholdPrefix: tntID := utils.NewTenantID(dataID) _, err = ms.GetThreshold(tntID.Tenant, tntID.ID, true, utils.NonTransactional) + case utils.FilterProfilePrefix: + tntID := utils.NewTenantID(dataID) + _, err = ms.GetFilterProfile(tntID.Tenant, tntID.ID, true, utils.NonTransactional) } if err != nil { return utils.NewCGRError(utils.MONGO, @@ -681,6 +686,11 @@ func (ms *MongoStorage) GetKeysForPrefix(prefix string) (result []string, err er for iter.Next(&idResult) { result = append(result, utils.TimingsPrefix+idResult.Id) } + case utils.FilterProfilePrefix: + iter := db.C(colFlt).Find(bson.M{"id": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"tenant": 1, "id": 1}).Iter() + for iter.Next(&idResult) { + result = append(result, utils.FilterProfilePrefix+utils.ConcatenatedKey(idResult.Tenant, idResult.Id)) + } default: err = fmt.Errorf("unsupported prefix in GetKeysForPrefix: %s", prefix) } @@ -2227,3 +2237,45 @@ func (ms *MongoStorage) RemoveThreshold(tenant, id string, transactionID string) cacheCommit(transactionID), transactionID) return nil } + +func (ms *MongoStorage) GetFilterProfile(tenant, id string, skipCache bool, transactionID string) (r *FilterProfile, err error) { + key := utils.FilterProfilePrefix + utils.ConcatenatedKey(tenant, id) + if !skipCache { + if x, ok := cache.Get(key); ok { + if x == nil { + return nil, utils.ErrNotFound + } + return x.(*FilterProfile), nil + } + } + session, col := ms.conn(colFlt) + defer session.Close() + r = new(FilterProfile) + if err = col.Find(bson.M{"tenant": tenant, "id": id}).One(r); err != nil { + if err == mgo.ErrNotFound { + err = utils.ErrNotFound + cache.Set(key, nil, cacheCommit(transactionID), transactionID) + } + return nil, err + } + cache.Set(key, r, cacheCommit(transactionID), transactionID) + return +} + +func (ms *MongoStorage) SetFilterProfile(r *FilterProfile) (err error) { + session, col := ms.conn(colFlt) + defer session.Close() + _, err = col.Upsert(bson.M{"tenant": r.Tenant, "id": r.ID}, r) + return +} + +func (ms *MongoStorage) RemoveFilterProfile(tenant, id string, transactionID string) (err error) { + session, col := ms.conn(colFlt) + defer session.Close() + if err = col.Remove(bson.M{"tenant": tenant, "id": id}); err != nil { + return + } + cache.RemKey(utils.FilterProfilePrefix+utils.ConcatenatedKey(tenant, id), + cacheCommit(transactionID), transactionID) + return nil +} diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go index a934fa0dd..13c76c2ae 100755 --- a/engine/storage_mongo_stordb.go +++ b/engine/storage_mongo_stordb.go @@ -1210,6 +1210,37 @@ func (ms *MongoStorage) SetTPThreshold(tpTHs []*utils.TPThreshold) (err error) { return } +func (ms *MongoStorage) GetTPFilter(tpid, id string) ([]*utils.TPFilter, error) { + filter := bson.M{ + "tpid": tpid, + } + if id != "" { + filter["id"] = id + } + var results []*utils.TPFilter + session, col := ms.conn(utils.TBLTPFilters) + defer session.Close() + err := col.Find(filter).All(&results) + if len(results) == 0 { + return results, utils.ErrNotFound + } + return results, err +} + +func (ms *MongoStorage) SetTPFilter(tpTHs []*utils.TPFilter) (err error) { + if len(tpTHs) == 0 { + return + } + session, col := ms.conn(utils.TBLTPFilters) + defer session.Close() + tx := col.Bulk() + for _, tp := range tpTHs { + tx.Upsert(bson.M{"tpid": tp.TPid, "id": tp.ID}, tp) + } + _, err = tx.Run() + return +} + func (ms *MongoStorage) GetVersions(itm string) (vrs Versions, err error) { session, col := ms.conn(colVer) defer session.Close() diff --git a/engine/storage_redis.go b/engine/storage_redis.go index dd49f6c21..db4526c3e 100755 --- a/engine/storage_redis.go +++ b/engine/storage_redis.go @@ -335,7 +335,7 @@ func (rs *RedisStorage) HasData(category, subject string) (bool, error) { switch category { case utils.DESTINATION_PREFIX, utils.RATING_PLAN_PREFIX, utils.RATING_PROFILE_PREFIX, utils.ACTION_PREFIX, utils.ACTION_PLAN_PREFIX, utils.ACCOUNT_PREFIX, utils.DERIVEDCHARGERS_PREFIX, - utils.ResourcesPrefix, utils.StatQueuePrefix, utils.ThresholdProfilePrefix: + utils.ResourcesPrefix, utils.StatQueuePrefix, utils.ThresholdProfilePrefix, utils.FilterProfilePrefix: i, err := rs.Cmd("EXISTS", category+subject).Int() return i == 1, err } @@ -1776,6 +1776,48 @@ func (rs *RedisStorage) RemoveThreshold(tenant, id string, transactionID string) return } +func (rs *RedisStorage) GetFilterProfile(tenant, id string, skipCache bool, transactionID string) (r *FilterProfile, err error) { + key := utils.FilterProfilePrefix + utils.ConcatenatedKey(tenant, id) + if !skipCache { + if x, ok := cache.Get(key); ok { + if x == nil { + return nil, utils.ErrNotFound + } + return x.(*FilterProfile), nil + } + } + var values []byte + if values, err = rs.Cmd("GET", key).Bytes(); err != nil { + if err == redis.ErrRespNil { // did not find the destination + cache.Set(key, nil, cacheCommit(transactionID), transactionID) + err = utils.ErrNotFound + } + return + } + if err = rs.ms.Unmarshal(values, &r); err != nil { + return + } + cache.Set(key, r, cacheCommit(transactionID), transactionID) + return +} + +func (rs *RedisStorage) SetFilterProfile(r *FilterProfile) (err error) { + result, err := rs.ms.Marshal(r) + if err != nil { + return err + } + return rs.Cmd("SET", utils.FilterProfilePrefix+utils.ConcatenatedKey(r.Tenant, r.ID), result).Err +} + +func (rs *RedisStorage) RemoveFilterProfile(tenant, id string, transactionID string) (err error) { + key := utils.FilterProfilePrefix + utils.ConcatenatedKey(tenant, id) + if err = rs.Cmd("DEL", key).Err; err != nil { + return + } + cache.RemKey(key, cacheCommit(transactionID), transactionID) + return +} + func (rs *RedisStorage) GetStorageType() string { return utils.REDIS } diff --git a/engine/storage_sql.go b/engine/storage_sql.go index f995115f3..2431c2326 100755 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -209,7 +209,7 @@ func (self *SQLStorage) RemTpData(table, tpid string, args map[string]string) er if len(table) == 0 { // Remove tpid out of all tables for _, tblName := range []string{utils.TBLTPTimings, utils.TBLTPDestinations, utils.TBLTPRates, utils.TBLTPDestinationRates, utils.TBLTPRatingPlans, utils.TBLTPRateProfiles, utils.TBLTPSharedGroups, utils.TBLTPCdrStats, utils.TBLTPLcrs, utils.TBLTPActions, utils.TBLTPActionPlans, utils.TBLTPActionTriggers, utils.TBLTPAccountActions, - utils.TBLTPDerivedChargers, utils.TBLTPAliases, utils.TBLTPUsers, utils.TBLTPResources, utils.TBLTPStats} { + utils.TBLTPDerivedChargers, utils.TBLTPAliases, utils.TBLTPUsers, utils.TBLTPResources, utils.TBLTPStats, utils.TBLTPFilters} { if err := tx.Table(tblName).Where("tpid = ?", tpid).Delete(nil).Error; err != nil { tx.Rollback() return err @@ -642,6 +642,28 @@ func (self *SQLStorage) SetTPThreshold(ths []*utils.TPThreshold) error { return nil } +func (self *SQLStorage) SetTPFilter(ths []*utils.TPFilter) error { + if len(ths) == 0 { + return nil + } + tx := self.db.Begin() + for _, th := range ths { + // Remove previous + if err := tx.Where(&TpFilter{Tpid: th.TPid, ID: th.ID}).Delete(TpFilter{}).Error; err != nil { + tx.Rollback() + return err + } + for _, mst := range APItoModelTPFilter(th) { + if err := tx.Save(&mst).Error; err != nil { + tx.Rollback() + return err + } + } + } + tx.Commit() + return nil +} + func (self *SQLStorage) SetSMCost(smc *SMCost) error { if smc.CostDetails == nil { return nil @@ -1604,6 +1626,22 @@ func (self *SQLStorage) GetTPThreshold(tpid, id string) ([]*utils.TPThreshold, e return aths, nil } +func (self *SQLStorage) GetTPFilter(tpid, id string) ([]*utils.TPFilter, error) { + var ths TpFilterS + q := self.db.Where("tpid = ?", tpid) + if len(id) != 0 { + q = q.Where("id = ?", id) + } + if err := q.Find(&ths).Error; err != nil { + return nil, err + } + aths := ths.AsTPFilter() + if len(aths) == 0 { + return aths, utils.ErrNotFound + } + return aths, nil +} + // GetVersions returns slice of all versions or a specific version if tag is specified func (self *SQLStorage) GetVersions(itm string) (vrs Versions, err error) { q := self.db.Model(&TBLVersion{}) diff --git a/engine/tp_reader.go b/engine/tp_reader.go index 4f658c77f..aa2b2e36f 100755 --- a/engine/tp_reader.go +++ b/engine/tp_reader.go @@ -57,9 +57,11 @@ type TpReader struct { resProfiles map[string]map[string]*utils.TPResource sqProfiles map[string]map[string]*utils.TPStats thProfiles map[string]map[string]*utils.TPThreshold + flProfiles map[string]map[string]*utils.TPFilter resources []*utils.TenantID // IDs of resources which need creation based on resourceProfiles statQueues []*utils.TenantID // IDs of statQueues which need creation based on statQueueProfiles thresholds []*utils.TenantID // IDs of thresholds which need creation based on thresholdProfiles + filters []*utils.TenantID revDests, revAliases, acntActionPlans map[string][]string @@ -134,6 +136,7 @@ func (tpr *TpReader) Init() { tpr.resProfiles = make(map[string]map[string]*utils.TPResource) tpr.sqProfiles = make(map[string]map[string]*utils.TPStats) tpr.thProfiles = make(map[string]map[string]*utils.TPThreshold) + tpr.flProfiles = make(map[string]map[string]*utils.TPFilter) tpr.revDests = make(map[string][]string) tpr.revAliases = make(map[string][]string) tpr.acntActionPlans = make(map[string][]string) @@ -1671,7 +1674,7 @@ func (tpr *TpReader) LoadThresholdsFiltered(tag string) error { tpr.thProfiles = mapTHs for tenant, mpID := range mapTHs { for thID := range mpID { - thTntID := &utils.TenantID{tenant, thID} + thTntID := &utils.TenantID{Tenant: tenant, ID: thID} if has, err := tpr.dataStorage.HasData(utils.ThresholdProfilePrefix, thTntID.TenantID()); err != nil { return err } else if !has { @@ -1686,6 +1689,36 @@ func (tpr *TpReader) LoadThresholds() error { return tpr.LoadThresholdsFiltered("") } +func (tpr *TpReader) LoadFilterFiltered(tag string) error { + tps, err := tpr.lr.GetTPFilter(tpr.tpid, tag) + if err != nil { + return err + } + mapTHs := make(map[string]map[string]*utils.TPFilter) + for _, th := range tps { + if _, has := mapTHs[th.Tenant]; !has { + mapTHs[th.Tenant] = make(map[string]*utils.TPFilter) + } + mapTHs[th.Tenant][th.ID] = th + } + tpr.flProfiles = mapTHs + for tenant, mpID := range mapTHs { + for thID := range mpID { + thTntID := &utils.TenantID{Tenant: tenant, ID: thID} + if has, err := tpr.dataStorage.HasData(utils.FilterProfilePrefix, thTntID.TenantID()); err != nil { + return err + } else if !has { + tpr.thresholds = append(tpr.filters, thTntID) + } + } + } + return nil +} + +func (tpr *TpReader) LoadFilter() error { + return tpr.LoadFilterFiltered("") +} + func (tpr *TpReader) LoadAll() (err error) { if err = tpr.LoadDestinations(); err != nil && err.Error() != utils.NotFoundCaps { return @@ -1744,6 +1777,9 @@ func (tpr *TpReader) LoadAll() (err error) { if err = tpr.LoadThresholds(); err != nil && err.Error() != utils.NotFoundCaps { return } + if err = tpr.LoadFilter(); err != nil && err.Error() != utils.NotFoundCaps { + return + } return nil } @@ -2058,6 +2094,23 @@ func (tpr *TpReader) WriteToDatabase(flush, verbose, disable_reverse bool) (err } } } + if verbose { + log.Print("Filters:") + } + for _, mpID := range tpr.flProfiles { + for _, tpTH := range mpID { + th, err := APItoFilterProfile(tpTH) + if err != nil { + return err + } + if err = tpr.dataStorage.SetFilterProfile(th); err != nil { + return err + } + if verbose { + log.Print("\t", th.TenantID()) + } + } + } if verbose { log.Print("Timings:") } @@ -2382,6 +2435,14 @@ func (tpr *TpReader) GetLoadedIds(categ string) ([]string, error) { i++ } return keys, nil + case utils.FilterProfilePrefix: + keys := make([]string, len(tpr.flProfiles)) + i := 0 + for k := range tpr.flProfiles { + keys[i] = k + i++ + } + return keys, nil } return nil, errors.New("Unsupported load category") } diff --git a/engine/tpexporter.go b/engine/tpexporter.go index 437b9f25f..0b8eeaf3d 100644 --- a/engine/tpexporter.go +++ b/engine/tpexporter.go @@ -250,6 +250,16 @@ func (self *TPExporter) Run() error { toExportMap[utils.ThresholdsCsv][i] = sdModel[0] } + storDataFilters, err := self.storDb.GetTPFilter(self.tpID, "") + if err != nil && err.Error() != utils.ErrNotFound.Error() { + return err + } + toExportMap[utils.FiltersCsv] = make([]interface{}, len(storDataFilters)) + for i, sd := range storDataFilters { + sdModel := APItoModelTPFilter(sd) + toExportMap[utils.FiltersCsv][i] = sdModel[0] + } + storDataUsers, err := self.storDb.GetTPUsers(&utils.TPUsers{TPid: self.tpID}) if err != nil && err.Error() != utils.ErrNotFound.Error() { return err diff --git a/engine/tpimporter_csv.go b/engine/tpimporter_csv.go index 3266874cf..03992064a 100755 --- a/engine/tpimporter_csv.go +++ b/engine/tpimporter_csv.go @@ -60,6 +60,7 @@ var fileHandlers = map[string]func(*TPCSVImporter, string) error{ utils.ResourcesCsv: (*TPCSVImporter).importResources, utils.StatsCsv: (*TPCSVImporter).importStats, utils.ThresholdsCsv: (*TPCSVImporter).importThresholds, + utils.FiltersCsv: (*TPCSVImporter).importFilters, } func (self *TPCSVImporter) Run() error { @@ -83,6 +84,7 @@ func (self *TPCSVImporter) Run() error { path.Join(self.DirPath, utils.ResourcesCsv), path.Join(self.DirPath, utils.StatsCsv), path.Join(self.DirPath, utils.ThresholdsCsv), + path.Join(self.DirPath, utils.FiltersCsv), ) files, _ := ioutil.ReadDir(self.DirPath) for _, f := range files { @@ -384,3 +386,14 @@ func (self *TPCSVImporter) importThresholds(fn string) error { } return self.StorDb.SetTPThreshold(sts) } + +func (self *TPCSVImporter) importFilters(fn string) error { + if self.Verbose { + log.Printf("Processing file: <%s> ", fn) + } + sts, err := self.csvr.GetTPFilter(self.TPid, "") + if err != nil { + return err + } + return self.StorDb.SetTPFilter(sts) +} diff --git a/general_tests/acntacts_test.go b/general_tests/acntacts_test.go index 450071c05..9f9953571 100644 --- a/general_tests/acntacts_test.go +++ b/general_tests/acntacts_test.go @@ -55,8 +55,9 @@ ENABLE_ACNT,*enable_account,,,,,,,,,,,,,,false,false,10` resLimits := `` stats := `` thresholds := `` + filters := `` csvr := engine.NewTpReader(dbAcntActs, engine.NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds), "", "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds, filters), "", "") if err := csvr.LoadAll(); err != nil { t.Fatal(err) } diff --git a/general_tests/auth_test.go b/general_tests/auth_test.go index 224a71e36..374d0ae40 100644 --- a/general_tests/auth_test.go +++ b/general_tests/auth_test.go @@ -62,8 +62,9 @@ RP_ANY,DR_ANY_1CNT,*any,10` resLimits := `` stats := `` thresholds := `` + filters := `` csvr := engine.NewTpReader(dbAuth, engine.NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds), "", "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds, filters), "", "") if err := csvr.LoadAll(); err != nil { t.Fatal(err) } diff --git a/general_tests/costs1_test.go b/general_tests/costs1_test.go index 5c270ac5e..6a5b78b02 100644 --- a/general_tests/costs1_test.go +++ b/general_tests/costs1_test.go @@ -51,7 +51,7 @@ RP_SMS1,DR_SMS_1,ALWAYS,10` *out,cgrates.org,data,*any,2012-01-01T00:00:00Z,RP_DATA1,, *out,cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,,` csvr := engine.NewTpReader(dataDB, engine.NewStringCSVStorage(',', dests, timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") + "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") if err := csvr.LoadTimings(); err != nil { t.Fatal(err) diff --git a/general_tests/datachrg1_test.go b/general_tests/datachrg1_test.go index 6f2e0ebc4..021de724a 100644 --- a/general_tests/datachrg1_test.go +++ b/general_tests/datachrg1_test.go @@ -42,7 +42,7 @@ DR_DATA_2,*any,RT_DATA_1c,*up,4,0,` RP_DATA1,DR_DATA_2,TM2,10` ratingProfiles := `*out,cgrates.org,data,*any,2012-01-01T00:00:00Z,RP_DATA1,,` csvr := engine.NewTpReader(dataDB, engine.NewStringCSVStorage(',', "", timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") + "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") if err := csvr.LoadTimings(); err != nil { t.Fatal(err) } diff --git a/general_tests/ddazmbl1_test.go b/general_tests/ddazmbl1_test.go index cb3295890..04a037472 100644 --- a/general_tests/ddazmbl1_test.go +++ b/general_tests/ddazmbl1_test.go @@ -62,8 +62,9 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` resLimits := `` stats := `` thresholds := `` + filters := `` csvr := engine.NewTpReader(dataDB, engine.NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds), "", "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds, filters), "", "") if err := csvr.LoadDestinations(); err != nil { t.Fatal(err) } diff --git a/general_tests/ddazmbl2_test.go b/general_tests/ddazmbl2_test.go index 890359e80..257b21f6e 100644 --- a/general_tests/ddazmbl2_test.go +++ b/general_tests/ddazmbl2_test.go @@ -62,8 +62,9 @@ TOPUP10_AT,TOPUP10_AC1,ASAP,10` resLimits := `` stats := `` thresholds := `` + filters := `` csvr := engine.NewTpReader(dataDB2, engine.NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds), "", "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds, filters), "", "") if err := csvr.LoadDestinations(); err != nil { t.Fatal(err) } diff --git a/general_tests/ddazmbl3_test.go b/general_tests/ddazmbl3_test.go index 145f554b5..e366bd6b6 100644 --- a/general_tests/ddazmbl3_test.go +++ b/general_tests/ddazmbl3_test.go @@ -60,8 +60,9 @@ RP_UK,DR_UK_Mobile_BIG5,ALWAYS,10` resLimits := `` stats := `` thresholds := `` + filters := `` csvr := engine.NewTpReader(dataDB3, engine.NewStringCSVStorage(',', destinations, timings, rates, destinationRates, ratingPlans, ratingProfiles, - sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds), "", "") + sharedGroups, lcrs, actions, actionPlans, actionTriggers, accountActions, derivedCharges, cdrStats, users, aliases, resLimits, stats, thresholds, filters), "", "") if err := csvr.LoadDestinations(); err != nil { t.Fatal(err) } diff --git a/general_tests/smschrg1_test.go b/general_tests/smschrg1_test.go index e02c08c8c..25515ffa0 100644 --- a/general_tests/smschrg1_test.go +++ b/general_tests/smschrg1_test.go @@ -40,7 +40,7 @@ func TestSMSLoadCsvTpSmsChrg1(t *testing.T) { ratingPlans := `RP_SMS1,DR_SMS_1,ALWAYS,10` ratingProfiles := `*out,cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,,` csvr := engine.NewTpReader(dataDB, engine.NewStringCSVStorage(',', "", timings, rates, destinationRates, ratingPlans, ratingProfiles, - "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") + "", "", "", "", "", "", "", "", "", "", "", "", "", ""), "", "") if err := csvr.LoadTimings(); err != nil { t.Fatal(err) } diff --git a/utils/apitpdata.go b/utils/apitpdata.go index 0460fa9e5..df349037c 100755 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -1367,3 +1367,13 @@ type TPThreshold struct { Weight float64 // Weight to sort the thresholds ActionIDs []string } + +type TPFilter struct { + TPid string + Tenant string + ID string + FilterType string // Filter type (*string, *timing, *rsr_filters, *cdr_stats) + FilterFieldName string // Name of the field providing us the Values to check (used in case of some ) + FilterFielValues []string // Filter definition + +} diff --git a/utils/consts.go b/utils/consts.go index b2462568d..03d2cbf8a 100755 --- a/utils/consts.go +++ b/utils/consts.go @@ -108,6 +108,7 @@ const ( TBLTPResources = "tp_resources" TBLTPStats = "tp_stats" TBLTPThresholds = "tp_thresholds" + TBLTPFilters = "tp_filters" TBLSMCosts = "sm_costs" TBLCDRs = "cdrs" TBLVersions = "versions" @@ -130,6 +131,7 @@ const ( ResourcesCsv = "Resources.csv" StatsCsv = "Stats.csv" ThresholdsCsv = "Thresholds.csv" + FiltersCsv = "Filters.csv" ROUNDING_UP = "*up" ROUNDING_MIDDLE = "*middle" ROUNDING_DOWN = "*down" @@ -247,6 +249,8 @@ const ( ThresholdPrefix = "ths_" ThresholdsIndex = "thi_" TimingsPrefix = "tmg_" + FilterProfilePrefix = "fpp" + FilterProfileIndex = "fpi" CDR_STATS_PREFIX = "cst_" TEMP_DESTINATION_PREFIX = "tmp_" LOG_CALL_COST_PREFIX = "cco_"