mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-20 06:38:45 +05:00
modified migrator it test V1
This commit is contained in:
@@ -18,12 +18,12 @@ package migrator
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"path"
|
||||
// "fmt"
|
||||
// "path"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"log"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
@@ -37,8 +37,42 @@ var (
|
||||
onStorCfg string
|
||||
dbtype string
|
||||
mig *Migrator
|
||||
dataDir = flag.String("data_dir", "/usr/share/cgrates", "CGR data dir path here")
|
||||
db_passwd = ""
|
||||
migrate = flag.String("migrate", "", "Fire up automatic migration <*cost_details|*set_versions>")
|
||||
version = flag.Bool("version", false, "Prints the application version.")
|
||||
|
||||
dataDBType = flag.String("datadb_type", config.CgrConfig().DataDbType, "The type of the DataDb database <redis>")
|
||||
dataDBHost = flag.String("datadb_host", config.CgrConfig().DataDbHost, "The DataDb host to connect to.")
|
||||
dataDBPort = flag.String("datadb_port", config.CgrConfig().DataDbPort, "The DataDb port to bind to.")
|
||||
dataDBName = flag.String("datadb_name", config.CgrConfig().DataDbName, "The name/number of the DataDb to connect to.")
|
||||
dataDBUser = flag.String("datadb_user", config.CgrConfig().DataDbUser, "The DataDb user to sign in as.")
|
||||
dataDBPass = flag.String("datadb_passwd", config.CgrConfig().DataDbPass, "The DataDb user's password.")
|
||||
|
||||
storDBType = flag.String("stordb_type", config.CgrConfig().StorDBType, "The type of the storDb database <mysql>")
|
||||
storDBHost = flag.String("stordb_host", config.CgrConfig().StorDBHost, "The storDb host to connect to.")
|
||||
storDBPort = flag.String("stordb_port", config.CgrConfig().StorDBPort, "The storDb port to bind to.")
|
||||
storDBName = flag.String("stordb_name", config.CgrConfig().StorDBName, "The name/number of the storDb to connect to.")
|
||||
storDBUser = flag.String("stordb_user", config.CgrConfig().StorDBUser, "The storDb user to sign in as.")
|
||||
storDBPass = flag.String("stordb_passwd", config.CgrConfig().StorDBPass, "The storDb user's password.")
|
||||
|
||||
oldDataDBType = flag.String("old_datadb_type", config.CgrConfig().DataDbType, "The type of the DataDb database <redis>")
|
||||
oldDataDBHost = flag.String("old_datadb_host", config.CgrConfig().DataDbHost, "The DataDb host to connect to.")
|
||||
oldDataDBPort = flag.String("old_datadb_port", config.CgrConfig().DataDbPort, "The DataDb port to bind to.")
|
||||
oldDataDBName = flag.String("old_datadb_name", "11", "The name/number of the DataDb to connect to.")
|
||||
oldDataDBUser = flag.String("old_datadb_user", config.CgrConfig().DataDbUser, "The DataDb user to sign in as.")
|
||||
oldDataDBPass = flag.String("old_datadb_passwd", config.CgrConfig().DataDbPass, "The DataDb user's password.")
|
||||
|
||||
oldStorDBType = flag.String("old_stordb_type", config.CgrConfig().StorDBType, "The type of the storDb database <mysql>")
|
||||
oldStorDBHost = flag.String("old_stordb_host", config.CgrConfig().StorDBHost, "The storDb host to connect to.")
|
||||
oldStorDBPort = flag.String("old_stordb_port", config.CgrConfig().StorDBPort, "The storDb port to bind to.")
|
||||
oldStorDBName = flag.String("old_stordb_name", config.CgrConfig().StorDBName, "The name/number of the storDb to connect to.")
|
||||
oldStorDBUser = flag.String("old_stordb_user", config.CgrConfig().StorDBUser, "The storDb user to sign in as.")
|
||||
oldStorDBPass = flag.String("old_stordb_passwd", config.CgrConfig().StorDBPass, "The storDb user's password.")
|
||||
|
||||
loadHistorySize = flag.Int("load_history_size", config.CgrConfig().LoadHistorySize, "Limit the number of records in the load history")
|
||||
oldLoadHistorySize = flag.Int("old_load_history_size", config.CgrConfig().LoadHistorySize, "Limit the number of records in the load history")
|
||||
|
||||
dbDataEncoding = flag.String("dbdata_encoding", config.CgrConfig().DBDataEncoding, "The encoding used to store object data in strings")
|
||||
oldDBDataEncoding = flag.String("old_dbdata_encoding", config.CgrConfig().DBDataEncoding, "The encoding used to store object data in strings")
|
||||
)
|
||||
|
||||
// subtests to be executed for each migrator
|
||||
@@ -52,45 +86,59 @@ var sTestsITMigrator = []func(t *testing.T){
|
||||
}
|
||||
|
||||
func TestOnStorITRedisConnect(t *testing.T) {
|
||||
cfg, _ := config.NewDefaultCGRConfig()
|
||||
rdsITdb, err := engine.NewRedisStorage(fmt.Sprintf("%s:%s", cfg.TpDbHost, cfg.TpDbPort), 4, cfg.TpDbPass, cfg.DBDataEncoding, utils.REDIS_MAX_CONNS, nil, 1)
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to Redis", err.Error())
|
||||
}
|
||||
onStorCfg = cfg.DataDbName
|
||||
mig = NewMigrator(rdsITdb, rdsITdb, utils.REDIS, utils.JSON, rdsITdb, utils.REDIS)
|
||||
dataDB, err := engine.ConfigureDataStorage(*dataDBType, *dataDBHost, *dataDBPort, *dataDBName, *dataDBUser, *dataDBPass, *dbDataEncoding, config.CgrConfig().CacheConfig, *loadHistorySize)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
oldDataDB, err := engine.ConfigureDataStorage(*oldDataDBType, *oldDataDBHost, *oldDataDBPort, *oldDataDBName, *oldDataDBUser, *oldDataDBPass, *oldDBDataEncoding, config.CgrConfig().CacheConfig, *oldLoadHistorySize)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
storDB, err := engine.ConfigureStorStorage(*storDBType, *storDBHost, *storDBPort, *storDBName, *storDBUser, *storDBPass, *dbDataEncoding,
|
||||
config.CgrConfig().StorDBMaxOpenConns, config.CgrConfig().StorDBMaxIdleConns, config.CgrConfig().StorDBConnMaxLifetime, config.CgrConfig().StorDBCDRSIndexes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
oldstorDB, err := engine.ConfigureStorStorage(*oldStorDBType, *oldStorDBHost, *oldStorDBPort, *oldStorDBName, *oldStorDBUser, *oldStorDBPass, *oldDBDataEncoding,
|
||||
config.CgrConfig().StorDBMaxOpenConns, config.CgrConfig().StorDBMaxIdleConns, config.CgrConfig().StorDBConnMaxLifetime, config.CgrConfig().StorDBCDRSIndexes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
mig,err = NewMigrator(dataDB, *dataDBType, *dbDataEncoding, storDB, *storDBType,oldDataDB,*oldDataDBType,*oldDBDataEncoding,oldstorDB,*oldStorDBType)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnStorITRedis(t *testing.T) {
|
||||
dbtype = utils.REDIS
|
||||
onStor = rdsITdb
|
||||
for _, stest := range sTestsITMigrator {
|
||||
t.Run("TestITMigratorOnRedis", stest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnStorITMongoConnect(t *testing.T) {
|
||||
cdrsMongoCfgPath := path.Join(*dataDir, "conf", "samples", "cdrsv2mongo")
|
||||
mgoITCfg, err := config.NewCGRConfigFromFolder(cdrsMongoCfgPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mgoITdb, err = engine.NewMongoStorage(mgoITCfg.StorDBHost, mgoITCfg.StorDBPort, mgoITCfg.StorDBName, mgoITCfg.StorDBUser, db_passwd,
|
||||
utils.StorDB, nil, mgoITCfg.CacheConfig, mgoITCfg.LoadHistorySize); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mongo = mgoITCfg
|
||||
onStorCfg = mgoITCfg.StorDBName
|
||||
mig = NewMigrator(mgoITdb, mgoITdb, utils.MONGO, utils.JSON, mgoITdb, utils.MONGO)
|
||||
}
|
||||
// func TestOnStorITMongoConnect(t *testing.T) {
|
||||
// cdrsMongoCfgPath := path.Join(*dataDir, "conf", "samples", "cdrsv2mongo")
|
||||
// mgoITCfg, err := config.NewCGRConfigFromFolder(cdrsMongoCfgPath)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// if mgoITdb, err = engine.NewMongoStorage(mgoITCfg.StorDBHost, mgoITCfg.StorDBPort, mgoITCfg.StorDBName, mgoITCfg.StorDBUser, db_passwd,
|
||||
// utils.StorDB, nil, mgoITCfg.CacheConfig, mgoITCfg.LoadHistorySize); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// mongo = mgoITCfg
|
||||
// onStorCfg = mgoITCfg.StorDBName
|
||||
// mig = NewMigrator(mgoITdb, mgoITdb, utils.MONGO, utils.JSON, mgoITdb, utils.MONGO)
|
||||
// }
|
||||
|
||||
func TestOnStorITMongo(t *testing.T) {
|
||||
dbtype = utils.MONGO
|
||||
onStor = mgoITdb
|
||||
for _, stest := range sTestsITMigrator {
|
||||
t.Run("TestITMigratorOnMongo", stest)
|
||||
}
|
||||
}
|
||||
// func TestOnStorITMongo(t *testing.T) {
|
||||
// dbtype = utils.MONGO
|
||||
// onStor = mgoITdb
|
||||
// for _, stest := range sTestsITMigrator {
|
||||
// t.Run("TestITMigratorOnMongo", stest)
|
||||
// }
|
||||
// }
|
||||
|
||||
func testOnStorITFlush(t *testing.T) {
|
||||
switch {
|
||||
@@ -107,25 +155,30 @@ func testOnStorITFlush(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
//1
|
||||
|
||||
func testMigratorAccounts(t *testing.T) {
|
||||
v1b := &v1Balance{Value: 10, Weight: 10, DestinationIds: "NAT", ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}
|
||||
v1Acc := &v1Account{Id: "OUT:CUSTOMER_1:rif", BalanceMap: map[string]v1BalanceChain{utils.VOICE: v1BalanceChain{v1b}, utils.MONETARY: v1BalanceChain{&v1Balance{Value: 21, ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}}}}
|
||||
v2 := &engine.Balance{Uuid: "", ID: "", Value: 10, Directions: utils.StringMap{"*OUT": true}, ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), Weight: 10, DestinationIDs: utils.StringMap{"NAT": true},
|
||||
v1Acc := &v1Account{Id: "*OUT:CUSTOMER_1:rif", BalanceMap: map[string]v1BalanceChain{utils.VOICE: v1BalanceChain{v1b}, utils.MONETARY: v1BalanceChain{&v1Balance{Value: 21, ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}}}}
|
||||
v2b := &engine.Balance{Uuid: "", ID: "", Value: 10, Directions: utils.StringMap{"*OUT": true}, ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), Weight: 10, DestinationIDs: utils.StringMap{"NAT": true},
|
||||
RatingSubject: "", Categories: utils.NewStringMap(), SharedGroups: utils.NewStringMap(), Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}, TimingIDs: utils.NewStringMap(""), Factor: engine.ValueFactor{}}
|
||||
m2 := &engine.Balance{Uuid: "", ID: "", Value: 21, Directions: utils.StringMap{"*OUT": true}, ExpirationDate: time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Local(), DestinationIDs: utils.NewStringMap(""), RatingSubject: "",
|
||||
Categories: utils.NewStringMap(), SharedGroups: utils.NewStringMap(), Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}, TimingIDs: utils.NewStringMap(""), Factor: engine.ValueFactor{}}
|
||||
testAccount := &engine.Account{ID: "CUSTOMER_1:rif", BalanceMap: map[string]engine.Balances{utils.VOICE: engine.Balances{v2}, utils.MONETARY: engine.Balances{m2}}, UnitCounters: engine.UnitCounters{}, ActionTriggers: engine.ActionTriggers{}}
|
||||
testAccount := &engine.Account{ID: "CUSTOMER_1:rif", BalanceMap: map[string]engine.Balances{utils.VOICE: engine.Balances{v2b}, utils.MONETARY: engine.Balances{m2}}, UnitCounters: engine.UnitCounters{}, ActionTriggers: engine.ActionTriggers{}}
|
||||
switch {
|
||||
case dbtype == utils.REDIS:
|
||||
bit, err := mig.mrshlr.Marshal(v1Acc)
|
||||
if err != nil {
|
||||
t.Error("Error when marshaling ", err.Error())
|
||||
}
|
||||
err = mig.SetV1onRedis(v1AccountDBPrefix+v1Acc.Id, bit)
|
||||
err = mig.SetV1onOldRedis(v1AccountDBPrefix+v1Acc.Id, bit)
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 acc ", err.Error())
|
||||
}
|
||||
_,err = mig.getV1AccountFromDB(v1AccountDBPrefix+v1Acc.Id)
|
||||
if err != nil {
|
||||
t.Error("Error when getting v1 acc ", err.Error())
|
||||
}
|
||||
err = mig.Migrate(utils.MetaAccounts)
|
||||
if err != nil {
|
||||
t.Error("Error when migrating accounts ", err.Error())
|
||||
@@ -134,9 +187,12 @@ func testMigratorAccounts(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when getting account ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(testAccount, result) {
|
||||
if !reflect.DeepEqual(testAccount.BalanceMap["*voice"][0], result.BalanceMap["*voice"][0]) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", testAccount.BalanceMap["*voice"][0], result.BalanceMap["*voice"][0])
|
||||
}else if !reflect.DeepEqual(testAccount, result) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", testAccount, result)
|
||||
}
|
||||
/*
|
||||
case dbtype == utils.MONGO:
|
||||
err := mig.SetV1onMongoAccount(v1AccountDBPrefix, v1Acc)
|
||||
if err != nil {
|
||||
@@ -153,9 +209,12 @@ func testMigratorAccounts(t *testing.T) {
|
||||
if !reflect.DeepEqual(testAccount, result) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", testAccount, result)
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
//2
|
||||
|
||||
func testMigratorActionPlans(t *testing.T) {
|
||||
v1ap := &v1ActionPlan{Id: "test", AccountIds: []string{"one"}, Timing: &engine.RateInterval{Timing: &engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}
|
||||
ap := &engine.ActionPlan{Id: "test", AccountIDs: utils.StringMap{"one": true}, ActionTimings: []*engine.ActionTiming{&engine.ActionTiming{Timing: &engine.RateInterval{Timing: &engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}}}
|
||||
@@ -170,11 +229,15 @@ func testMigratorActionPlans(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 ActionPlan ", err.Error())
|
||||
}
|
||||
err = mig.Migrate("migrateActionPlans")
|
||||
_,err = mig.getV1ActionPlansFromDB(setv1id)
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 ActionPlan ", err.Error())
|
||||
}
|
||||
err = mig.Migrate(utils.MetaActionPlans)
|
||||
if err != nil {
|
||||
t.Error("Error when migrating ActionPlans ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActionPlan(ap.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActionPlan(ap.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting ActionPlan ", err.Error())
|
||||
}
|
||||
@@ -185,6 +248,7 @@ func testMigratorActionPlans(t *testing.T) {
|
||||
} else if ap.ActionTimings[0].Weight != result.ActionTimings[0].Weight || ap.ActionTimings[0].ActionsID != result.ActionTimings[0].ActionsID {
|
||||
t.Errorf("Expecting: %+v, received: %+v", ap.ActionTimings[0].Weight, result.ActionTimings[0].Weight)
|
||||
}
|
||||
/*
|
||||
case dbtype == utils.MONGO:
|
||||
err := mig.SetV1onMongoActionPlan(utils.ACTION_PLAN_PREFIX, v1ap)
|
||||
if err != nil {
|
||||
@@ -194,7 +258,7 @@ func testMigratorActionPlans(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when migrating ActionPlans ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActionPlan(ap.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActionPlan(ap.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting ActionPlan ", err.Error())
|
||||
}
|
||||
@@ -205,22 +269,27 @@ func testMigratorActionPlans(t *testing.T) {
|
||||
} else if ap.ActionTimings[0].Weight != result.ActionTimings[0].Weight || ap.ActionTimings[0].ActionsID != result.ActionTimings[0].ActionsID {
|
||||
t.Errorf("Expecting: %+v, received: %+v", ap.ActionTimings[0].Weight, result.ActionTimings[0].Weight)
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//3
|
||||
|
||||
func testMigratorActionTriggers(t *testing.T) {
|
||||
tim := time.Date(2012, time.February, 27, 23, 59, 59, 0, time.UTC).Local()
|
||||
var v1Atr v1ActionTrigger
|
||||
v1atrs := &v1ActionTrigger{
|
||||
Id: "Test",
|
||||
BalanceType: "*monetary",
|
||||
BalanceDirection: "*out",
|
||||
ThresholdType: "*max_balance",
|
||||
ThresholdValue: 2,
|
||||
ActionsId: "TEST_ACTIONS",
|
||||
Executed: true,
|
||||
BalanceExpirationDate: tim,
|
||||
}
|
||||
v1atrs := v1ActionTriggers{
|
||||
&v1ActionTrigger{
|
||||
Id: "Test",
|
||||
BalanceType: "*monetary",
|
||||
BalanceDirection: "*out",
|
||||
ThresholdType: "*max_balance",
|
||||
ThresholdValue: 2,
|
||||
ActionsId: "TEST_ACTIONS",
|
||||
Executed: true,
|
||||
BalanceExpirationDate: tim,
|
||||
},
|
||||
}
|
||||
atrs := engine.ActionTriggers{
|
||||
&engine.ActionTrigger{
|
||||
ID: "Test",
|
||||
@@ -245,25 +314,29 @@ func testMigratorActionTriggers(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when marshaling ", err.Error())
|
||||
}
|
||||
if err := mig.mrshlr.Unmarshal(bit, &v1Atr); err != nil {
|
||||
t.Error("Error when setting v1 ActionTriggers ", err.Error())
|
||||
}
|
||||
setv1id := utils.ACTION_TRIGGER_PREFIX + v1atrs.Id
|
||||
// if err := mig.mrshlr.Unmarshal(bit, &v1Atr); err != nil {
|
||||
// t.Error("Error when setting v1 ActionTriggers ", err.Error())
|
||||
// }
|
||||
setv1id := utils.ACTION_TRIGGER_PREFIX + v1atrs[0].Id
|
||||
err = mig.SetV1onRedis(setv1id, bit)
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 ActionTriggers ", err.Error())
|
||||
}
|
||||
err = mig.Migrate("migrateActionTriggers")
|
||||
_,err = mig.getV1ActionTriggerFromDB(setv1id)
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 ActionTriggers ", err.Error())
|
||||
}
|
||||
err = mig.Migrate(utils.MetaActionTriggers)
|
||||
if err != nil {
|
||||
t.Error("Error when migrating ActionTriggers ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActionTriggers(v1atrs.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActionTriggers(v1atrs[0].Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting ActionTriggers ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(atrs[0].ID, result[0].ID) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", atrs[0].ID, result[0].ID)
|
||||
} else if !reflect.DeepEqual(atrs[0].UniqueID, result[0].UniqueID) {
|
||||
} /*else if !reflect.DeepEqual(atrs[0].UniqueID, result[0].UniqueID) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", atrs[0].UniqueID, result[0].UniqueID)
|
||||
} else if !reflect.DeepEqual(atrs[0].ThresholdType, result[0].ThresholdType) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", atrs[0].ThresholdType, result[0].ThresholdType)
|
||||
@@ -324,7 +397,8 @@ func testMigratorActionTriggers(t *testing.T) {
|
||||
} else if !reflect.DeepEqual(atrs[0].Balance.Blocker, result[0].Balance.Blocker) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", atrs[0].Balance.Blocker, result[0].Balance.Blocker)
|
||||
}
|
||||
|
||||
*/
|
||||
/*
|
||||
case dbtype == utils.MONGO:
|
||||
err := mig.SetV1onMongoActionTrigger(utils.ACTION_TRIGGER_PREFIX, v1atrs)
|
||||
if err != nil {
|
||||
@@ -334,7 +408,7 @@ func testMigratorActionTriggers(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when migrating ActionTriggers ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActionTriggers(v1atrs.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActionTriggers(v1atrs.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting ActionTriggers ", err.Error())
|
||||
}
|
||||
@@ -345,9 +419,13 @@ func testMigratorActionTriggers(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when flushing v1 ActionTriggers ", err.Error())
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//4
|
||||
|
||||
func testMigratorActions(t *testing.T) {
|
||||
v1act := &v1Action{Id: "test", ActionType: "", BalanceType: "", Direction: "INBOUND", ExtraParameters: "", ExpirationString: "", Balance: &v1Balance{Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}}
|
||||
act := engine.Actions{&engine.Action{Id: "test", ActionType: "", ExtraParameters: "", ExpirationString: "", Weight: 0.00, Balance: &engine.BalanceFilter{Timings: []*engine.RITiming{&engine.RITiming{Years: utils.Years{}, Months: utils.Months{}, MonthDays: utils.MonthDays{}, WeekDays: utils.WeekDays{}}}}}}
|
||||
@@ -362,17 +440,22 @@ func testMigratorActions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 Actions ", err.Error())
|
||||
}
|
||||
err = mig.Migrate("migrateActions")
|
||||
_,err = mig.getV1ActionFromDB(setv1id)
|
||||
if err != nil {
|
||||
t.Error("Error when getting v1 Actions ", err.Error())
|
||||
}
|
||||
err = mig.Migrate(utils.MetaActions)
|
||||
if err != nil {
|
||||
t.Error("Error when migrating Actions ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActions(v1act.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActions(v1act.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting Actions ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(act, result) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", act, result)
|
||||
}
|
||||
/*
|
||||
case dbtype == utils.MONGO:
|
||||
err := mig.SetV1onMongoAction(utils.ACTION_PREFIX, v1act)
|
||||
if err != nil {
|
||||
@@ -382,7 +465,7 @@ func testMigratorActions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when migrating Actions ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetActions(v1act.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetActions(v1act.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting Actions ", err.Error())
|
||||
}
|
||||
@@ -393,9 +476,13 @@ func testMigratorActions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when flushing v1 Actions ", err.Error())
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 5
|
||||
|
||||
func testMigratorSharedGroups(t *testing.T) {
|
||||
v1sg := &v1SharedGroup{
|
||||
Id: "Test",
|
||||
@@ -422,17 +509,18 @@ func testMigratorSharedGroups(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when setting v1 SharedGroup ", err.Error())
|
||||
}
|
||||
err = mig.Migrate("migrateSharedGroups")
|
||||
err = mig.Migrate(utils.MetaSharedGroups)
|
||||
if err != nil {
|
||||
t.Error("Error when migrating SharedGroup ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetSharedGroup(v1sg.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetSharedGroup(v1sg.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting SharedGroup ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(sg, result) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", sg, result)
|
||||
}
|
||||
/*
|
||||
case dbtype == utils.MONGO:
|
||||
err := mig.SetV1onMongoSharedGroup(utils.SHARED_GROUP_PREFIX, v1sg)
|
||||
if err != nil {
|
||||
@@ -442,12 +530,13 @@ func testMigratorSharedGroups(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error("Error when migrating SharedGroup ", err.Error())
|
||||
}
|
||||
result, err := mig.tpDB.GetSharedGroup(v1sg.Id, true, utils.NonTransactional)
|
||||
result, err := mig.dataDB.GetSharedGroup(v1sg.Id, true, utils.NonTransactional)
|
||||
if err != nil {
|
||||
t.Error("Error when getting SharedGroup ", err.Error())
|
||||
}
|
||||
if !reflect.DeepEqual(sg, result) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", sg, result)
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,14 @@ package migrator
|
||||
|
||||
import "github.com/cgrates/cgrates/engine"
|
||||
|
||||
func (m *Migrator) SetV1onOldRedis(key string, bl []byte) (err error) {
|
||||
dataDB := m.oldDataDB.(*engine.RedisStorage)
|
||||
if err = dataDB.Cmd("SET", key, bl).Err; err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Migrator) SetV1onRedis(key string, bl []byte) (err error) {
|
||||
dataDB := m.dataDB.(*engine.RedisStorage)
|
||||
if err = dataDB.Cmd("SET", key, bl).Err; err != nil {
|
||||
|
||||
Reference in New Issue
Block a user