mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-19 22:28:45 +05:00
Started removing timing
This commit is contained in:
committed by
Dan Christian Bogos
parent
53474406e3
commit
7671c0cb5e
@@ -113,14 +113,6 @@ func (dbM *DataDBMock) RemoveResourceDrv(string, string) error {
|
||||
return utils.ErrNotImplemented
|
||||
}
|
||||
|
||||
func (dbM *DataDBMock) GetTimingDrv(string) (*utils.TPTiming, error) {
|
||||
return nil, utils.ErrNotImplemented
|
||||
}
|
||||
|
||||
func (dbM *DataDBMock) SetTimingDrv(*utils.TPTiming) error {
|
||||
return utils.ErrNotImplemented
|
||||
}
|
||||
|
||||
func (dbM *DataDBMock) RemoveTimingDrv(string) error {
|
||||
return utils.ErrNotImplemented
|
||||
}
|
||||
|
||||
@@ -262,7 +262,7 @@ func NewFilterRule(rfType, fieldName string, vals []string) (*FilterRule, error)
|
||||
// FilterRule filters requests coming into various places
|
||||
// Pass rule: default negative, one matching rule should pass the filter
|
||||
type FilterRule struct {
|
||||
Type string // Filter type (*string, *timing, *rsr_filters, *stats, *lt, *lte, *gt, *gte)
|
||||
Type string // Filter type (*string, *rsr_filters, *stats, *lt, *lte, *gt, *gte)
|
||||
Element string // Name of the field providing us the Values to check (used in case of some )
|
||||
Values []string // Filter definition
|
||||
rsrValues config.RSRParsers // Cache here the
|
||||
|
||||
@@ -59,12 +59,6 @@ DST_UK_Mobile_BIG5,447956
|
||||
URG,112
|
||||
EU_LANDLINE,444
|
||||
EXOTIC,999
|
||||
`
|
||||
TimingsCSVContent = `
|
||||
WORKDAYS_00,*any,*any,*any,1;2;3;4;5,00:00:00
|
||||
WORKDAYS_18,*any,*any,*any,1;2;3;4;5,18:00:00
|
||||
WEEKENDS,*any,*any,*any,6;7,00:00:00
|
||||
ONE_TIME_RUN,2012,,,,*asap
|
||||
`
|
||||
|
||||
ActionsCSVContent = `
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
@@ -37,16 +36,12 @@ var csvr *TpReader
|
||||
func init() {
|
||||
var err error
|
||||
csvr, err = NewTpReader(dm.dataDB, NewStringCSVStorage(utils.CSVSep,
|
||||
TimingsCSVContent,
|
||||
ResourcesCSVContent, StatsCSVContent, ThresholdsCSVContent, FiltersCSVContent,
|
||||
RoutesCSVContent, AttributesCSVContent, ChargersCSVContent, DispatcherCSVContent,
|
||||
DispatcherHostCSVContent, RateProfileCSVContent, ActionProfileCSVContent, AccountCSVContent), testTPID, "", nil, nil, false)
|
||||
if err != nil {
|
||||
log.Print("error when creating TpReader:", err)
|
||||
}
|
||||
if err := csvr.LoadTimings(); err != nil {
|
||||
log.Print("error in LoadTimings:", err)
|
||||
}
|
||||
if err := csvr.LoadFilters(); err != nil {
|
||||
log.Print("error in LoadFilter:", err)
|
||||
}
|
||||
@@ -88,56 +83,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadTimimgs(t *testing.T) {
|
||||
if len(csvr.timings) != 14 {
|
||||
t.Error("Failed to load timings: ", csvr.timings)
|
||||
}
|
||||
timing := csvr.timings["WORKDAYS_00"]
|
||||
if !reflect.DeepEqual(timing, &utils.TPTiming{
|
||||
ID: "WORKDAYS_00",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{1, 2, 3, 4, 5},
|
||||
StartTime: "00:00:00",
|
||||
}) {
|
||||
t.Error("Error loading timing: ", timing)
|
||||
}
|
||||
timing = csvr.timings["WORKDAYS_18"]
|
||||
if !reflect.DeepEqual(timing, &utils.TPTiming{
|
||||
ID: "WORKDAYS_18",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{1, 2, 3, 4, 5},
|
||||
StartTime: "18:00:00",
|
||||
}) {
|
||||
t.Error("Error loading timing: ", timing)
|
||||
}
|
||||
timing = csvr.timings["WEEKENDS"]
|
||||
if !reflect.DeepEqual(timing, &utils.TPTiming{
|
||||
ID: "WEEKENDS",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{time.Saturday, time.Sunday},
|
||||
StartTime: "00:00:00",
|
||||
}) {
|
||||
t.Error("Error loading timing: ", timing)
|
||||
}
|
||||
timing = csvr.timings["ONE_TIME_RUN"]
|
||||
if !reflect.DeepEqual(timing, &utils.TPTiming{
|
||||
ID: "ONE_TIME_RUN",
|
||||
Years: utils.Years{2012},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: "*asap",
|
||||
}) {
|
||||
t.Error("Error loading timing: ", timing)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadResourceProfiles(t *testing.T) {
|
||||
eResProfiles := map[utils.TenantID]*utils.TPResourceProfile{
|
||||
{Tenant: "cgrates.org", ID: "ResGroup21"}: {
|
||||
|
||||
@@ -31,185 +31,6 @@ import (
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
func TestModelHelperCsvLoad(t *testing.T) {
|
||||
l, err := csvLoad(DestinationMdl{}, []string{"TEST_DEST", "+492"})
|
||||
tpd, ok := l.(DestinationMdl)
|
||||
if err != nil || !ok || tpd.Tag != "TEST_DEST" || tpd.Prefix != "+492" {
|
||||
t.Errorf("model load failed: %+v", tpd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelHelperCsvDump(t *testing.T) {
|
||||
tpd := DestinationMdl{
|
||||
Tag: "TEST_DEST",
|
||||
Prefix: "+492"}
|
||||
csv, err := CsvDump(tpd)
|
||||
if err != nil || csv[0] != "TEST_DEST" || csv[1] != "+492" {
|
||||
t.Errorf("model load failed: %+v", tpd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapTPTimings(t *testing.T) {
|
||||
var tps []*utils.ApierTPTiming
|
||||
eOut := map[string]*utils.TPTiming{}
|
||||
if rcv, err := MapTPTimings(tps); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eOut, rcv) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
|
||||
}
|
||||
|
||||
tps = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
},
|
||||
}
|
||||
eOut = map[string]*utils.TPTiming{
|
||||
"ID1": {
|
||||
ID: "ID1",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
},
|
||||
}
|
||||
if rcv, err := MapTPTimings(tps); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eOut, rcv) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", utils.ToJSON(eOut), utils.ToJSON(rcv))
|
||||
}
|
||||
tps = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
Months: "1;2;3;4",
|
||||
},
|
||||
}
|
||||
eOut = map[string]*utils.TPTiming{
|
||||
"ID1": {
|
||||
ID: "ID1",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{1, 2, 3, 4},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
},
|
||||
}
|
||||
if rcv, err := MapTPTimings(tps); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eOut, rcv) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", utils.ToJSON(eOut), utils.ToJSON(rcv))
|
||||
}
|
||||
//same id error
|
||||
tps = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
Months: "1;2;3;4",
|
||||
},
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
Months: "1;2;3;4",
|
||||
},
|
||||
}
|
||||
eOut = map[string]*utils.TPTiming{
|
||||
"ID1": {
|
||||
ID: "ID1",
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{1, 2, 3, 4},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
},
|
||||
}
|
||||
if _, err := MapTPTimings(tps); err == nil || err.Error() != "duplicate timing tag: ID1" {
|
||||
t.Errorf("Expecting: nil, received: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPItoModelTimings(t *testing.T) {
|
||||
ts := []*utils.ApierTPTiming{}
|
||||
if rcv := APItoModelTimings(ts); rcv != nil {
|
||||
t.Errorf("Expecting: nil, received: %+v", utils.ToJSON(rcv))
|
||||
}
|
||||
|
||||
ts = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
Months: "1;2;3;4",
|
||||
},
|
||||
}
|
||||
eOut := TimingMdls{
|
||||
TimingMdl{
|
||||
Tpid: "TPid1",
|
||||
Months: "1;2;3;4",
|
||||
Tag: "ID1",
|
||||
},
|
||||
}
|
||||
if rcv := APItoModelTimings(ts); !reflect.DeepEqual(eOut, rcv) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", utils.ToJSON(eOut), utils.ToJSON(rcv))
|
||||
}
|
||||
ts = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "TPid1",
|
||||
ID: "ID1",
|
||||
Months: "1;2;3;4",
|
||||
},
|
||||
{
|
||||
TPid: "TPid2",
|
||||
ID: "ID2",
|
||||
Months: "1;2;3;4",
|
||||
MonthDays: "1;2;3;4;28",
|
||||
Years: "2020;2019",
|
||||
WeekDays: "4;5",
|
||||
},
|
||||
}
|
||||
eOut = TimingMdls{
|
||||
TimingMdl{
|
||||
Tpid: "TPid1",
|
||||
Months: "1;2;3;4",
|
||||
Tag: "ID1",
|
||||
},
|
||||
TimingMdl{
|
||||
Tpid: "TPid2",
|
||||
Tag: "ID2",
|
||||
Months: "1;2;3;4",
|
||||
MonthDays: "1;2;3;4;28",
|
||||
Years: "2020;2019",
|
||||
WeekDays: "4;5",
|
||||
},
|
||||
}
|
||||
if rcv := APItoModelTimings(ts); !reflect.DeepEqual(eOut, rcv) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", utils.ToJSON(eOut), utils.ToJSON(rcv))
|
||||
}
|
||||
}
|
||||
|
||||
func TestApierTPTimingAsExportSlice(t *testing.T) {
|
||||
tpTiming := &utils.ApierTPTiming{
|
||||
TPid: "TEST_TPID",
|
||||
ID: "TEST_TIMING",
|
||||
Years: "*any",
|
||||
Months: "*any",
|
||||
MonthDays: "*any",
|
||||
WeekDays: "1;2;4",
|
||||
Time: "00:00:01"}
|
||||
expectedSlc := [][]string{
|
||||
{"TEST_TIMING", "*any", "*any", "*any", "1;2;4", "00:00:01"},
|
||||
}
|
||||
ms := APItoModelTiming(tpTiming)
|
||||
var slc [][]string
|
||||
|
||||
lc, err := CsvDump(ms)
|
||||
if err != nil {
|
||||
t.Error("Error dumping to csv: ", err)
|
||||
}
|
||||
slc = append(slc, lc)
|
||||
|
||||
if !reflect.DeepEqual(expectedSlc, slc) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", expectedSlc, slc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTpResourcesAsTpResources(t *testing.T) {
|
||||
tps := []*ResourceMdl{
|
||||
{
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
|
||||
Copyright (C) ITsysCOM GmbH
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or56
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
|
||||
package engine
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
func TestModelsTimingMdlTableName(t *testing.T) {
|
||||
testStruct := TimingMdl{}
|
||||
exp := utils.TBLTPTimings
|
||||
result := testStruct.TableName()
|
||||
if !reflect.DeepEqual(exp, result) {
|
||||
t.Errorf("\nExpected <%+v>,\nreceived <%+v>", exp, result)
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,6 @@ type CSVStorage struct {
|
||||
sep rune
|
||||
generator func() csvReaderCloser
|
||||
// file names
|
||||
timingsFn []string
|
||||
resProfilesFn []string
|
||||
statsFn []string
|
||||
thresholdsFn []string
|
||||
@@ -117,7 +116,7 @@ func NewFileCSVStorage(sep rune, dataPath string) *CSVStorage {
|
||||
}
|
||||
|
||||
// NewStringCSVStorage creates a csv storage from strings
|
||||
func NewStringCSVStorage(sep rune, timingsFn,
|
||||
func NewStringCSVStorage(sep rune,
|
||||
resProfilesFn, statsFn, thresholdsFn, filterFn, routeProfilesFn,
|
||||
attributeProfilesFn, chargerProfilesFn, dispatcherProfilesFn, dispatcherHostsFn,
|
||||
rateProfilesFn, actionProfilesFn, accountsFn string) *CSVStorage {
|
||||
|
||||
@@ -58,7 +58,6 @@ const (
|
||||
ColVer = "versions"
|
||||
ColRsP = "resource_profiles"
|
||||
ColIndx = "indexes"
|
||||
ColTmg = "timings"
|
||||
ColRes = "resources"
|
||||
ColSqs = "statqueues"
|
||||
ColSqp = "statqueue_profiles"
|
||||
@@ -871,42 +870,6 @@ func (ms *MongoStorage) RemoveResourceDrv(tenant, id string) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (ms *MongoStorage) GetTimingDrv(id string) (t *utils.TPTiming, err error) {
|
||||
t = new(utils.TPTiming)
|
||||
err = ms.query(context.TODO(), func(sctx mongo.SessionContext) (err error) {
|
||||
cur := ms.getCol(ColTmg).FindOne(sctx, bson.M{"id": id})
|
||||
if err := cur.Decode(t); err != nil {
|
||||
t = nil
|
||||
if err == mongo.ErrNoDocuments {
|
||||
return utils.ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ms *MongoStorage) SetTimingDrv(t *utils.TPTiming) (err error) {
|
||||
return ms.query(context.TODO(), func(sctx mongo.SessionContext) (err error) {
|
||||
_, err = ms.getCol(ColTmg).UpdateOne(sctx, bson.M{"id": t.ID},
|
||||
bson.M{"$set": t},
|
||||
options.Update().SetUpsert(true),
|
||||
)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (ms *MongoStorage) RemoveTimingDrv(id string) (err error) {
|
||||
return ms.query(context.TODO(), func(sctx mongo.SessionContext) (err error) {
|
||||
dr, err := ms.getCol(ColTmg).DeleteOne(sctx, bson.M{"id": id})
|
||||
if dr.DeletedCount == 0 {
|
||||
return utils.ErrNotFound
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// GetStatQueueProfileDrv retrieves a StatQueueProfile from dataDB
|
||||
func (ms *MongoStorage) GetStatQueueProfileDrv(tenant string, id string) (sq *StatQueueProfile, err error) {
|
||||
sq = new(StatQueueProfile)
|
||||
|
||||
@@ -35,7 +35,6 @@ type TpReader struct {
|
||||
timezone string
|
||||
dm *DataManager
|
||||
lr LoadReader
|
||||
timings map[string]*utils.TPTiming
|
||||
resProfiles map[utils.TenantID]*utils.TPResourceProfile
|
||||
sqProfiles map[utils.TenantID]*utils.TPStatProfile
|
||||
thProfiles map[utils.TenantID]*utils.TPThresholdProfile
|
||||
@@ -70,14 +69,11 @@ func NewTpReader(db DataDB, lr LoadReader, tpid, timezone string,
|
||||
isInternalDB: isInternalDB,
|
||||
}
|
||||
tpr.Init()
|
||||
//add default timing tag (in case of no timings file)
|
||||
tpr.addDefaultTimings()
|
||||
|
||||
return tpr, nil
|
||||
}
|
||||
|
||||
func (tpr *TpReader) Init() {
|
||||
tpr.timings = make(map[string]*utils.TPTiming)
|
||||
tpr.resProfiles = make(map[utils.TenantID]*utils.TPResourceProfile)
|
||||
tpr.sqProfiles = make(map[utils.TenantID]*utils.TPStatProfile)
|
||||
tpr.thProfiles = make(map[utils.TenantID]*utils.TPThresholdProfile)
|
||||
@@ -1245,98 +1241,3 @@ func (tpr *TpReader) ReloadScheduler(verbose bool) (err error) { // ToDoNext: ad
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
func (tpr *TpReader) addDefaultTimings() {
|
||||
tpr.timings[utils.MetaAny] = &utils.TPTiming{
|
||||
ID: utils.MetaAny,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: "00:00:00",
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaASAP] = &utils.TPTiming{
|
||||
ID: utils.MetaASAP,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: utils.MetaASAP,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaEveryMinute] = &utils.TPTiming{
|
||||
ID: utils.MetaEveryMinute,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: utils.ConcatenatedKey(utils.Meta, utils.Meta, strconv.Itoa(time.Now().Second())),
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaHourly] = &utils.TPTiming{
|
||||
ID: utils.MetaHourly,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: utils.ConcatenatedKey(utils.Meta, strconv.Itoa(time.Now().Minute()), strconv.Itoa(time.Now().Second())),
|
||||
EndTime: "",
|
||||
}
|
||||
startTime := time.Now().Format("15:04:05")
|
||||
tpr.timings[utils.MetaDaily] = &utils.TPTiming{
|
||||
ID: utils.MetaDaily,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaWeekly] = &utils.TPTiming{
|
||||
ID: utils.MetaWeekly,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{},
|
||||
WeekDays: utils.WeekDays{time.Now().Weekday()},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaMonthly] = &utils.TPTiming{
|
||||
ID: utils.MetaMonthly,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{time.Now().Day()},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaMonthlyEstimated] = &utils.TPTiming{
|
||||
ID: utils.MetaMonthlyEstimated,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{time.Now().Day()},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaMonthEnd] = &utils.TPTiming{
|
||||
ID: utils.MetaMonthEnd,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{},
|
||||
MonthDays: utils.MonthDays{-1},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
tpr.timings[utils.MetaYearly] = &utils.TPTiming{
|
||||
ID: utils.MetaYearly,
|
||||
Years: utils.Years{},
|
||||
Months: utils.Months{time.Now().Month()},
|
||||
MonthDays: utils.MonthDays{time.Now().Day()},
|
||||
WeekDays: utils.WeekDays{},
|
||||
StartTime: startTime,
|
||||
EndTime: "",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -718,7 +718,6 @@ func TestReloadCache(t *testing.T) {
|
||||
APIOpts: map[string]interface{}{},
|
||||
Tenant: "",
|
||||
ArgsCache: map[string][]string{
|
||||
"TimingIDs": {"TimingsID"},
|
||||
"ResourceProfileIDs": {"cgrates.org:resourceProfilesID"},
|
||||
"StatsQueueProfileIDs": {"cgrates.org:statProfilesID"},
|
||||
"ThresholdProfileIDs": {"cgrates.org:thresholdProfilesID"},
|
||||
@@ -752,9 +751,6 @@ func TestReloadCache(t *testing.T) {
|
||||
utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches): rpcInternal,
|
||||
})
|
||||
tpr := &TpReader{
|
||||
timings: map[string]*utils.TPTiming{
|
||||
"TimingsID": {},
|
||||
},
|
||||
resProfiles: map[utils.TenantID]*utils.TPResourceProfile{
|
||||
{Tenant: "cgrates.org", ID: "resourceProfilesID"}: {},
|
||||
},
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestCurrentDBVersions(t *testing.T) {
|
||||
expVersDataDB := Versions{
|
||||
utils.StatS: 4, utils.Accounts: 3, utils.Actions: 2,
|
||||
utils.Thresholds: 4, utils.Routes: 2, utils.Attributes: 7,
|
||||
utils.Timing: 1, utils.RQF: 5, utils.Resource: 1,
|
||||
utils.RQF: 5, utils.Resource: 1,
|
||||
utils.Subscribers: 1,
|
||||
utils.Chargers: 2,
|
||||
utils.Dispatchers: 2, utils.LoadIDsVrs: 1, utils.RateProfiles: 1,
|
||||
@@ -83,7 +83,7 @@ func TestCurrentDBVersions(t *testing.T) {
|
||||
expVersStorDB := Versions{
|
||||
utils.CostDetails: 2, utils.SessionSCosts: 3, utils.CDRs: 2,
|
||||
utils.TpFilters: 1, utils.TpThresholds: 1, utils.TpRoutes: 1,
|
||||
utils.TpStats: 1, utils.TpResources: 1, utils.TpTiming: 1,
|
||||
utils.TpStats: 1, utils.TpResources: 1,
|
||||
utils.TpResource: 1,
|
||||
utils.TpChargers: 1, utils.TpDispatchers: 1,
|
||||
utils.TpRateProfiles: 1, utils.TpActionProfiles: 1,
|
||||
|
||||
@@ -172,7 +172,6 @@ func testActionsExecuteRemoveSMCos1(t *testing.T) {
|
||||
Actions: []*utils.TPAction{
|
||||
{
|
||||
Identifier: utils.MetaRemoveSessionCosts,
|
||||
TimingTags: utils.MetaASAP,
|
||||
ExtraParameters: "*string:~*sc.OriginID:13;*notstring:~*sc.OriginID:12",
|
||||
Weight: 20,
|
||||
},
|
||||
@@ -205,7 +204,6 @@ func testActionsExecuteRemoveSMCos2(t *testing.T) {
|
||||
Actions: []*utils.TPAction{
|
||||
{
|
||||
Identifier: utils.MetaRemoveSessionCosts,
|
||||
TimingTags: utils.MetaASAP,
|
||||
ExtraParameters: "",
|
||||
Weight: 20,
|
||||
},
|
||||
|
||||
@@ -52,7 +52,6 @@ var (
|
||||
testLoaderITWriteToDatabase,
|
||||
testLoaderITImportToStorDb,
|
||||
testLoaderITInitDataDB,
|
||||
testLoaderITLoadFromStorDb,
|
||||
testLoaderITInitDataDB,
|
||||
}
|
||||
)
|
||||
@@ -153,9 +152,6 @@ func testLoaderITRemoveLoad(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = loader.LoadTimings(); err != nil {
|
||||
t.Error("Failed loading timings: ", err.Error())
|
||||
}
|
||||
if err = loader.LoadFilters(); err != nil {
|
||||
t.Error("Failed loading filters: ", err.Error())
|
||||
}
|
||||
@@ -205,9 +201,6 @@ func testLoaderITLoadFromCSV(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err = loader.LoadTimings(); err != nil {
|
||||
t.Error("Failed loading timings: ", err.Error())
|
||||
}
|
||||
if err = loader.LoadFilters(); err != nil {
|
||||
t.Error("Failed loading filters: ", err.Error())
|
||||
}
|
||||
@@ -241,15 +234,6 @@ func testLoaderITLoadFromCSV(t *testing.T) {
|
||||
}
|
||||
|
||||
func testLoaderITWriteToDatabase(t *testing.T) {
|
||||
for k, tm := range loader.timings {
|
||||
rcv, err := loader.dm.GetTiming(k, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Failed GetTiming: ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(tm, rcv) {
|
||||
t.Errorf("Expecting: %v, received: %v", tm, rcv)
|
||||
}
|
||||
}
|
||||
|
||||
for tenantid, fltr := range loader.filters {
|
||||
rcv, err := loader.dm.GetFilter(context.TODO(), tenantid.Tenant, tenantid.ID, false, false, utils.NonTransactional)
|
||||
@@ -395,14 +379,6 @@ func testLoaderITImportToStorDb(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Loads data from storDb into dataDb
|
||||
func testLoaderITLoadFromStorDb(t *testing.T) {
|
||||
loader, _ := NewTpReader(dataDbCsv.DataDB(), storDb, utils.TestSQL, "", []string{utils.ConcatenatedKey(utils.MetaInternal, utils.MetaCaches)}, nil, false)
|
||||
if err := loader.LoadTimings(); err != nil && err.Error() != utils.NotFoundCaps {
|
||||
t.Error("Failed loading timings: ", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// Compares previously loaded data from csv and stor to be identical, redis specific tests
|
||||
func TestMatchLoadCsvWithStorRating(t *testing.T) {
|
||||
|
||||
@@ -47,7 +47,6 @@ var (
|
||||
// ToDo: testOnStorITLoadAccountingCache
|
||||
testOnStorITResource,
|
||||
testOnStorITResourceProfile,
|
||||
testOnStorITTiming,
|
||||
//testOnStorITCRUDHistory,
|
||||
testOnStorITCRUDStructVersion,
|
||||
testOnStorITStatQueueProfile,
|
||||
@@ -240,76 +239,6 @@ func testOnStorITResource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testOnStorITTiming(t *testing.T) {
|
||||
tmg := &utils.TPTiming{
|
||||
ID: "TEST",
|
||||
Years: utils.Years{2016, 2017},
|
||||
Months: utils.Months{time.January, time.February, time.March},
|
||||
MonthDays: utils.MonthDays{1, 2, 3, 4},
|
||||
WeekDays: utils.WeekDays{1, 2, 3},
|
||||
StartTime: "00:00:00",
|
||||
EndTime: "",
|
||||
}
|
||||
if _, rcvErr := onStor.GetTiming(tmg.ID, false,
|
||||
utils.NonTransactional); rcvErr != utils.ErrNotFound {
|
||||
t.Error(rcvErr)
|
||||
}
|
||||
if err := onStor.SetTiming(context.TODO(), tmg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if onStor.dataDB.GetStorageType() != utils.Internal {
|
||||
//get from cache
|
||||
if rcv, err := onStor.GetTiming(tmg.ID, false, utils.NonTransactional); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tmg, rcv) {
|
||||
t.Errorf("Expecting: %v, received: %v", tmg, rcv)
|
||||
}
|
||||
}
|
||||
//get from database
|
||||
if rcv, err := onStor.GetTiming(tmg.ID, true, utils.NonTransactional); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tmg, rcv) {
|
||||
t.Errorf("Expecting: %v, received: %v", tmg, rcv)
|
||||
}
|
||||
expectedT := []string{"tmg_TEST"}
|
||||
if itm, err := onStor.DataDB().GetKeysForPrefix(context.TODO(), utils.TimingsPrefix); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(expectedT, itm) {
|
||||
t.Errorf("Expected : %+v, but received %+v", expectedT, itm)
|
||||
}
|
||||
//update
|
||||
tmg.MonthDays = utils.MonthDays{1, 2, 3, 4, 5, 6, 7}
|
||||
if err := onStor.SetTiming(context.TODO(), tmg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
//get from cache
|
||||
if rcv, err := onStor.GetTiming(tmg.ID, false, utils.NonTransactional); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tmg, rcv) {
|
||||
t.Errorf("Expecting: %v, received: %v", tmg, rcv)
|
||||
}
|
||||
//get from database
|
||||
if rcv, err := onStor.GetTiming(tmg.ID, true, utils.NonTransactional); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tmg, rcv) {
|
||||
t.Errorf("Expecting: %v, received: %v", tmg, rcv)
|
||||
}
|
||||
if err := onStor.RemoveTiming(tmg.ID, utils.NonTransactional); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
//check cache if removed
|
||||
if _, rcvErr := onStor.GetTiming(tmg.ID, false,
|
||||
utils.NonTransactional); rcvErr != utils.ErrNotFound {
|
||||
t.Error(rcvErr)
|
||||
}
|
||||
//check database if removed
|
||||
if _, rcvErr := onStor.GetTiming(tmg.ID, true,
|
||||
utils.NonTransactional); rcvErr != utils.ErrNotFound {
|
||||
t.Error(rcvErr)
|
||||
}
|
||||
}
|
||||
|
||||
func testOnStorITCRUDHistory(t *testing.T) {
|
||||
time := time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
ist := &utils.LoadInstance{
|
||||
|
||||
@@ -54,7 +54,6 @@ var sTestsStorDBit = []func(t *testing.T){
|
||||
testStorDBitCRUDTPThresholds,
|
||||
testStorDBitCRUDTPAttributes,
|
||||
testStorDBitCRUDTPChargers,
|
||||
testStorDBitCRUDTpTimings,
|
||||
testStorDBitCRUDTpResources,
|
||||
testStorDBitCRUDTpStats,
|
||||
testStorDBitCRUDCDRs,
|
||||
@@ -872,67 +871,6 @@ func testStorDBitCRUDTPChargers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testStorDBitCRUDTpTimings(t *testing.T) {
|
||||
// READ
|
||||
if _, err := storDB.GetTPTimings("testTPid", ""); err != utils.ErrNotFound {
|
||||
t.Error(err)
|
||||
}
|
||||
// WRITE
|
||||
var snd = []*utils.ApierTPTiming{
|
||||
{
|
||||
TPid: "testTPid",
|
||||
ID: "testTag1",
|
||||
Years: "*any",
|
||||
Months: "*any",
|
||||
MonthDays: "*any",
|
||||
WeekDays: "1;2;3;4;5",
|
||||
Time: "01:00:00",
|
||||
},
|
||||
{
|
||||
TPid: "testTPid",
|
||||
ID: "testTag2",
|
||||
Years: "*any",
|
||||
Months: "*any",
|
||||
MonthDays: "*any",
|
||||
WeekDays: "1;2;3;4;5",
|
||||
Time: "01:00:00",
|
||||
},
|
||||
}
|
||||
if err := storDB.SetTPTimings(snd); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// READ
|
||||
if rcv, err := storDB.GetTPTimings("testTPid", ""); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if !(reflect.DeepEqual(snd[0], rcv[0]) || reflect.DeepEqual(snd[0], rcv[1])) {
|
||||
t.Errorf("Expecting:\n%+v\nReceived:\n%+v\n||\n%+v", utils.ToJSON(snd[0]), utils.ToJSON(rcv[0]), utils.ToJSON(rcv[1]))
|
||||
}
|
||||
}
|
||||
// UPDATE
|
||||
snd[0].Time = "02:00:00"
|
||||
snd[1].Time = "02:00:00"
|
||||
if err := storDB.SetTPTimings(snd); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// READ
|
||||
if rcv, err := storDB.GetTPTimings("testTPid", ""); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if !(reflect.DeepEqual(snd[0], rcv[0]) || reflect.DeepEqual(snd[0], rcv[1])) {
|
||||
t.Errorf("Expecting:\n%+v\nReceived:\n%+v\n||\n%+v", utils.ToJSON(snd[0]), utils.ToJSON(rcv[0]), utils.ToJSON(rcv[1]))
|
||||
}
|
||||
}
|
||||
// REMOVE
|
||||
if err := storDB.RemTpData("", "testTPid", nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// READ
|
||||
if _, err := storDB.GetTPTimings("testTPid", ""); err != utils.ErrNotFound {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testStorDBitCRUDTpResources(t *testing.T) {
|
||||
// READ
|
||||
if _, err := storDB.GetTPResources("testTPid", "", ""); err != utils.ErrNotFound {
|
||||
|
||||
Reference in New Issue
Block a user