diff --git a/general_tests/cacherpl_it_test.go b/general_tests/cacherpl_it_test.go
index 75425e5d1..1be47cbcb 100644
--- a/general_tests/cacherpl_it_test.go
+++ b/general_tests/cacherpl_it_test.go
@@ -20,76 +20,79 @@ along with this program. If not, see
*/
package general_tests
-// import (
-// "net/rpc"
-// "os/exec"
-// "path"
-// "reflect"
-// "sort"
-// "sync"
-// "testing"
-// "time"
+import (
+ "os/exec"
+ "path"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "time"
-// "github.com/cgrates/cgrates/config"
-// "github.com/cgrates/cgrates/engine"
-// "github.com/cgrates/cgrates/utils"
-// )
+ "github.com/cgrates/birpc"
+ "github.com/cgrates/birpc/context"
+ "github.com/cgrates/cgrates/apis"
+ "github.com/cgrates/cgrates/config"
+ "github.com/cgrates/cgrates/engine"
+ "github.com/cgrates/cgrates/utils"
+)
-// var (
-// dspEngine1Cfg *config.CGRConfig
-// dspEngine1CfgPath string
-// dspEngine1RPC *rpc.Client
-// dspEngine2Cfg *config.CGRConfig
-// dspEngine2CfgPath string
-// dspEngine2RPC *rpc.Client
-// engine1Cfg *config.CGRConfig
-// engine1CfgPath string
-// engine1RPC *rpc.Client
+var (
+ dspEngine1Cfg *config.CGRConfig
+ dspEngine1CfgPath string
+ dspEngine1RPC *birpc.Client
+ dspEngine2Cfg *config.CGRConfig
+ dspEngine2CfgPath string
+ dspEngine2RPC *birpc.Client
+ engine1Cfg *config.CGRConfig
+ engine1CfgPath string
+ engine1RPC *birpc.Client
-// sTestsCacheRpl = []func(t *testing.T){
-// testCacheRplInitCfg,
-// testCacheRplInitDataDb,
-// testCacheRplStartEngine,
-// testCacheRplRpcConn,
-// testCacheRplAddData,
-// testCacheRplPing,
-// testCacheRplCheckReplication,
-// testCacheRplCheckLoadReplication,
+ sTestsCacheRpl = []func(t *testing.T){
+ testCacheRplInitCfg,
+ testCacheRplInitDataDb,
+ testCacheRplStartEngine,
+ testCacheRplRpcConn,
+ testCacheRplAddData,
+ testCacheRplPing,
+ testCacheRplCheckReplication,
+ testCacheRplCheckLoadReplication,
-// testCacheRplStopEngine,
-// }
+ testCacheRplStopEngine,
+ }
-// sTestsCacheRplAA = []func(t *testing.T){
-// testCacheRplAAInitCfg,
-// testCacheRplInitDataDb,
-// testCacheRplStartEngine,
-// testCacheRplRpcConn,
-// testCacheRplAAAddData,
-// testCacheRplAACheckReplication,
-// testCacheRplAACheckLoadReplication,
+ sTestsCacheRplAA = []func(t *testing.T){
+ testCacheRplAAInitCfg,
+ testCacheRplInitDataDb,
+ testCacheRplStartEngine,
+ testCacheRplRpcConn,
+ testCacheRplAAAddData,
+ testCacheRplAACheckReplication,
+ testCacheRplAACheckLoadReplication,
-// testCacheRplStopEngine,
-// }
-// )
+ testCacheRplStopEngine,
+ }
+)
-// func TestCacheReplications(t *testing.T) {
-// switch *dbType {
-// case utils.MetaInternal:
-// t.SkipNow()
-// case utils.MetaMySQL:
-// for _, stest := range sTestsCacheRpl {
-// t.Run("TestCacheReplications", stest)
-// }
-// case utils.MetaMongo:
-// t.SkipNow()
-// case utils.MetaPostgres:
-// t.SkipNow()
-// default:
-// t.Fatal("Unknown Database type")
-// }
+func TestCacheReplications(t *testing.T) {
+ switch *dbType {
+ case utils.MetaInternal:
+ t.SkipNow()
+ case utils.MetaMySQL:
+ for _, stest := range sTestsCacheRpl {
+ t.Run("TestCacheReplications", stest)
+ }
+ case utils.MetaMongo:
+ t.SkipNow()
+ case utils.MetaPostgres:
+ t.SkipNow()
+ default:
+ t.Fatal("Unknown Database type")
+ }
-// }
+}
+//to be fixed
// func TestCacheReplicationActiveActive(t *testing.T) {
// switch *dbType {
// case utils.MetaInternal:
@@ -107,529 +110,535 @@ package general_tests
// }
// }
-// func testCacheRplInitCfg(t *testing.T) {
-// var err error
-// dspEngine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "dispatcher_engine")
-// dspEngine1Cfg, err = config.NewCGRConfigFromPath(dspEngine1CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
+func testCacheRplInitCfg(t *testing.T) {
+ var err error
+ dspEngine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "dispatcher_engine")
+ dspEngine1Cfg, err = config.NewCGRConfigFromPath(context.Background(), dspEngine1CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
-// dspEngine2CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "dispatcher_engine2")
-// dspEngine2Cfg, err = config.NewCGRConfigFromPath(dspEngine2CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
+ dspEngine2CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "dispatcher_engine2")
+ dspEngine2Cfg, err = config.NewCGRConfigFromPath(context.Background(), dspEngine2CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
-// engine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "engine1")
-// engine1Cfg, err = config.NewCGRConfigFromPath(engine1CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
-// }
+ engine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_replicate", "engine1")
+ engine1Cfg, err = config.NewCGRConfigFromPath(context.Background(), engine1CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
+}
-// func testCacheRplAAInitCfg(t *testing.T) {
-// var err error
-// dspEngine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "dispatcher_engine")
-// dspEngine1Cfg, err = config.NewCGRConfigFromPath(dspEngine1CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
+func testCacheRplAAInitCfg(t *testing.T) {
+ var err error
+ dspEngine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "dispatcher_engine")
+ dspEngine1Cfg, err = config.NewCGRConfigFromPath(context.Background(), dspEngine1CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
-// dspEngine2CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "dispatcher_engine2")
-// dspEngine2Cfg, err = config.NewCGRConfigFromPath(dspEngine2CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
+ dspEngine2CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "dispatcher_engine2")
+ dspEngine2Cfg, err = config.NewCGRConfigFromPath(context.Background(), dspEngine2CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
-// engine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "engine1")
-// engine1Cfg, err = config.NewCGRConfigFromPath(engine1CfgPath)
-// if err != nil {
-// t.Error(err)
-// }
-// }
+ engine1CfgPath = path.Join(*dataDir, "conf", "samples", "cache_rpl_active_active", "engine1")
+ engine1Cfg, err = config.NewCGRConfigFromPath(context.Background(), engine1CfgPath)
+ if err != nil {
+ t.Error(err)
+ }
+}
-// func testCacheRplInitDataDb(t *testing.T) {
-// if err := engine.InitDataDB(dspEngine1Cfg); err != nil {
-// t.Fatal(err)
-// }
-// if err := engine.InitDataDB(dspEngine2Cfg); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testCacheRplInitDataDb(t *testing.T) {
+ if err := engine.InitDataDB(dspEngine1Cfg); err != nil {
+ t.Fatal(err)
+ }
+ if err := engine.InitDataDB(dspEngine2Cfg); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testCacheRplStartEngine(t *testing.T) {
-// if _, err := engine.StopStartEngine(dspEngine1CfgPath, *waitRater); err != nil {
-// t.Fatal(err)
-// }
-// if _, err := engine.StartEngine(dspEngine2CfgPath, *waitRater); err != nil {
-// t.Fatal(err)
-// }
-// if _, err := engine.StartEngine(engine1CfgPath, *waitRater); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testCacheRplStartEngine(t *testing.T) {
+ if _, err := engine.StopStartEngine(dspEngine1CfgPath, *waitRater); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := engine.StartEngine(dspEngine2CfgPath, *waitRater); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := engine.StartEngine(engine1CfgPath, *waitRater); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testCacheRplRpcConn(t *testing.T) {
-// var err error
-// dspEngine1RPC, err = newRPCClient(dspEngine1Cfg.ListenCfg())
-// if err != nil {
-// t.Fatal(err)
-// }
-// dspEngine2RPC, err = newRPCClient(dspEngine2Cfg.ListenCfg())
-// if err != nil {
-// t.Fatal(err)
-// }
-// engine1RPC, err = newRPCClient(engine1Cfg.ListenCfg())
-// if err != nil {
-// t.Fatal(err)
-// }
-// }
+func testCacheRplRpcConn(t *testing.T) {
+ var err error
+ dspEngine1RPC, err = newRPCClient(dspEngine1Cfg.ListenCfg())
+ if err != nil {
+ t.Fatal(err)
+ }
+ dspEngine2RPC, err = newRPCClient(dspEngine2Cfg.ListenCfg())
+ if err != nil {
+ t.Fatal(err)
+ }
+ engine1RPC, err = newRPCClient(engine1Cfg.ListenCfg())
+ if err != nil {
+ t.Fatal(err)
+ }
+}
-// func testCacheRplAddData(t *testing.T) {
-// wchan := make(chan struct{}, 1)
-// go func() {
-// loaderPath, err := exec.LookPath("cgr-loader")
-// if err != nil {
-// t.Error(err)
-// }
-// loader := exec.Command(loaderPath, "-config_path", dspEngine1CfgPath, "-path",
-// path.Join(*dataDir, "tariffplans", "cache_replications", "dispatcher_engine"))
+func testCacheRplAddData(t *testing.T) {
+ wchan := make(chan struct{}, 1)
+ go func() {
+ loaderPath, err := exec.LookPath("cgr-loader")
+ if err != nil {
+ t.Error(err)
+ }
+ loader := exec.Command(loaderPath, "-config_path", dspEngine1CfgPath, "-path",
+ path.Join(*dataDir, "tariffplans", "cache_replications", "dispatcher_engine"))
-// if err := loader.Start(); err != nil {
-// t.Error(err)
-// }
-// loader.Wait()
-// wchan <- struct{}{}
-// }()
-// select {
-// case <-wchan:
-// case <-time.After(2 * time.Second):
-// t.Errorf("cgr-loader failed: ")
-// }
+ if err := loader.Start(); err != nil {
+ t.Error(err)
+ }
+ loader.Wait()
+ wchan <- struct{}{}
+ }()
+ select {
+ case <-wchan:
+ case <-time.After(2 * time.Second):
+ t.Errorf("cgr-loader failed: ")
+ }
-// go func() {
-// loaderPath, err := exec.LookPath("cgr-loader")
-// if err != nil {
-// t.Error(err)
-// }
-// loader := exec.Command(loaderPath, "-config_path", dspEngine2CfgPath, "-path",
-// path.Join(*dataDir, "tariffplans", "cache_replications", "dispatcher_engine2"))
+ go func() {
+ loaderPath, err := exec.LookPath("cgr-loader")
+ if err != nil {
+ t.Error(err)
+ }
+ loader := exec.Command(loaderPath, "-config_path", dspEngine2CfgPath, "-path",
+ path.Join(*dataDir, "tariffplans", "cache_replications", "dispatcher_engine2"))
-// if err := loader.Start(); err != nil {
-// t.Error(err)
-// }
-// loader.Wait()
-// wchan <- struct{}{}
-// }()
-// select {
-// case <-wchan:
-// case <-time.After(2 * time.Second):
-// t.Errorf("cgr-loader failed: ")
-// }
+ if err := loader.Start(); err != nil {
+ t.Error(err)
+ }
+ loader.Wait()
+ wchan <- struct{}{}
+ }()
+ select {
+ case <-wchan:
+ case <-time.After(2 * time.Second):
+ t.Errorf("cgr-loader failed: ")
+ }
-// chargerProfile := &v1.ChargerWithAPIOpts{
-// ChargerProfile: &engine.ChargerProfile{
-// Tenant: "cgrates.org",
-// ID: "DefaultCharger",
-// RunID: utils.MetaDefault,
-// AttributeIDs: []string{utils.MetaNone},
-// Weight: 20,
-// },
-// }
-// var result string
-// if err := engine1RPC.Call(utils.AdminSv1SetChargerProfile, chargerProfile, &result); err != nil {
-// t.Error(err)
-// } else if result != utils.OK {
-// t.Error("Unexpected reply returned", result)
-// }
-// }
+ chargerProfile := &apis.ChargerWithAPIOpts{
+ ChargerProfile: &engine.ChargerProfile{
+ Tenant: "cgrates.org",
+ ID: "DefaultCharger",
+ RunID: utils.MetaDefault,
+ AttributeIDs: []string{utils.MetaNone},
+ Weights: utils.DynamicWeights{
+ {
+ Weight: 20,
+ }},
+ },
+ }
+ var result string
+ if err := engine1RPC.Call(context.Background(), utils.AdminSv1SetChargerProfile, chargerProfile, &result); err != nil {
+ t.Error(err)
+ } else if result != utils.OK {
+ t.Error("Unexpected reply returned", result)
+ }
+}
-// func testCacheRplAAAddData(t *testing.T) {
-// wchan := make(chan struct{}, 1)
-// go func() {
-// loaderPath, err := exec.LookPath("cgr-loader")
-// if err != nil {
-// t.Error(err)
-// }
-// loader := exec.Command(loaderPath, "-config_path", dspEngine1CfgPath, "-path",
-// path.Join(*dataDir, "tariffplans", "cache_rpl_active_active", "dispatcher_engine"))
+func testCacheRplAAAddData(t *testing.T) {
+ wchan := make(chan struct{}, 1)
+ go func() {
+ loaderPath, err := exec.LookPath("cgr-loader")
+ if err != nil {
+ t.Error(err)
+ }
+ loader := exec.Command(loaderPath, "-config_path", dspEngine1CfgPath, "-path",
+ path.Join(*dataDir, "tariffplans", "cache_rpl_active_active", "dispatcher_engine"))
-// if err := loader.Start(); err != nil {
-// t.Error(err)
-// }
-// loader.Wait()
-// wchan <- struct{}{}
-// }()
-// select {
-// case <-wchan:
-// case <-time.After(2 * time.Second):
-// t.Errorf("cgr-loader failed: ")
-// }
+ if err := loader.Start(); err != nil {
+ t.Error(err)
+ }
+ loader.Wait()
+ wchan <- struct{}{}
+ }()
+ select {
+ case <-wchan:
+ case <-time.After(2 * time.Second):
+ t.Errorf("cgr-loader failed: ")
+ }
-// go func() {
-// loaderPath, err := exec.LookPath("cgr-loader")
-// if err != nil {
-// t.Error(err)
-// }
-// loader := exec.Command(loaderPath, "-config_path", dspEngine2CfgPath, "-path",
-// path.Join(*dataDir, "tariffplans", "cache_rpl_active_active", "dispatcher_engine2"))
+ go func() {
+ loaderPath, err := exec.LookPath("cgr-loader")
+ if err != nil {
+ t.Error(err)
+ }
+ loader := exec.Command(loaderPath, "-config_path", dspEngine2CfgPath, "-path",
+ path.Join(*dataDir, "tariffplans", "cache_rpl_active_active", "dispatcher_engine2"))
-// if err := loader.Start(); err != nil {
-// t.Error(err)
-// }
-// loader.Wait()
-// wchan <- struct{}{}
-// }()
-// select {
-// case <-wchan:
-// case <-time.After(2 * time.Second):
-// t.Errorf("cgr-loader failed: ")
-// }
+ if err := loader.Start(); err != nil {
+ t.Error(err)
+ }
+ loader.Wait()
+ wchan <- struct{}{}
+ }()
+ select {
+ case <-wchan:
+ case <-time.After(2 * time.Second):
+ t.Errorf("cgr-loader failed: ")
+ }
-// chargerProfile := &v1.ChargerWithAPIOpts{
-// ChargerProfile: &engine.ChargerProfile{
-// Tenant: "cgrates.org",
-// ID: "DefaultCharger",
-// RunID: utils.MetaDefault,
-// AttributeIDs: []string{utils.MetaNone},
-// Weight: 20,
-// },
-// }
-// var result string
-// if err := engine1RPC.Call(utils.AdminSv1SetChargerProfile, chargerProfile, &result); err != nil {
-// t.Error(err)
-// } else if result != utils.OK {
-// t.Error("Unexpected reply returned", result)
-// }
-// }
+ chargerProfile := &apis.ChargerWithAPIOpts{
+ ChargerProfile: &engine.ChargerProfile{
+ Tenant: "cgrates.org",
+ ID: "DefaultCharger",
+ RunID: utils.MetaDefault,
+ AttributeIDs: []string{utils.MetaNone},
+ Weights: utils.DynamicWeights{
+ {
+ Weight: 20,
+ }},
+ },
+ }
+ var result string
+ if err := engine1RPC.Call(context.Background(), utils.AdminSv1SetChargerProfile, chargerProfile, &result); err != nil {
+ t.Error(err)
+ } else if result != utils.OK {
+ t.Error("Unexpected reply returned", result)
+ }
+}
-// func testCacheRplPing(t *testing.T) {
-// var reply map[string]interface{}
-// ev := utils.TenantWithAPIOpts{
-// Tenant: "cgrates.org",
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRoute123",
-// },
-// }
-// if err := dspEngine1RPC.Call(utils.CoreSv1Status, &ev, &reply); err != nil {
-// t.Error(err)
-// } else if reply[utils.NodeID] != "Engine1" {
-// t.Errorf("Received: %s", utils.ToJSON(reply))
-// }
+func testCacheRplPing(t *testing.T) {
+ var reply map[string]interface{}
+ ev := utils.TenantWithAPIOpts{
+ Tenant: "cgrates.org",
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRoute123",
+ },
+ }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CoreSv1Status, &ev, &reply); err != nil {
+ t.Error(err)
+ } else if reply[utils.NodeID] != "Engine1" {
+ t.Errorf("Received: %s", utils.ToJSON(reply))
+ }
-// var rpl string
-// if err := dspEngine1RPC.Call(utils.AttributeSv1Ping, &utils.CGREvent{
-// Tenant: "cgrates.org",
+ var rpl string
+ if err := dspEngine1RPC.Call(context.Background(), utils.AttributeSv1Ping, &utils.CGREvent{
+ Tenant: "cgrates.org",
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRoute123",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl != utils.Pong {
-// t.Errorf("Received: %s", rpl)
-// }
-// }
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRoute123",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl != utils.Pong {
+ t.Errorf("Received: %s", rpl)
+ }
+}
-// func testCacheRplCheckReplication(t *testing.T) {
-// var reply map[string]interface{}
-// ev := utils.TenantWithAPIOpts{
-// Tenant: "cgrates.org",
-// }
-// if err := dspEngine2RPC.Call(utils.CoreSv1Status, &ev, &reply); err != nil {
-// t.Error(err)
-// } else if reply[utils.NodeID] != "DispatcherEngine2" {
-// t.Errorf("Received: %s", utils.ToJSON(reply))
-// }
-// var rcvKeys []string
-// expKeys := []string{"testRoute123:*core", "testRoute123:*attributes"}
-// argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherRoutes,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+func testCacheRplCheckReplication(t *testing.T) {
+ var reply map[string]interface{}
+ ev := utils.TenantWithAPIOpts{
+ Tenant: "cgrates.org",
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CoreSv1Status, &ev, &reply); err != nil {
+ t.Error(err)
+ } else if reply[utils.NodeID] != "DispatcherEngine2" {
+ t.Errorf("Received: %s", utils.ToJSON(reply))
+ }
+ var rcvKeys []string
+ expKeys := []string{"testRoute123:*core", "testRoute123:*attributes"}
+ argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherRoutes,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// var rpl string
-// if err := dspEngine2RPC.Call(utils.AttributeSv1Ping, &utils.CGREvent{
-// Tenant: "cgrates.org",
+ var rpl string
+ if err := dspEngine2RPC.Call(context.Background(), utils.AttributeSv1Ping, &utils.CGREvent{
+ Tenant: "cgrates.org",
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRoute123",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl != utils.Pong {
-// t.Errorf("Received: %s", rpl)
-// }
-// }
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRoute123",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl != utils.Pong {
+ t.Errorf("Received: %s", rpl)
+ }
+}
-// func testCacheRplAACheckReplication(t *testing.T) {
-// var rcvKeys []string
-// argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherRoutes,
-// },
-// }
-// if err := dspEngine1RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
-// err.Error() != utils.ErrNotFound.Error() {
-// t.Error(err)
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
-// err.Error() != utils.ErrNotFound.Error() {
-// t.Error(err)
-// }
+func testCacheRplAACheckReplication(t *testing.T) {
+ var rcvKeys []string
+ argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherRoutes,
+ },
+ }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
+ err.Error() != utils.ErrNotFound.Error() {
+ t.Error(err)
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
+ err.Error() != utils.ErrNotFound.Error() {
+ t.Error(err)
+ }
-// var rpl string
-// if err := dspEngine2RPC.Call(utils.AttributeSv1Ping, &utils.CGREvent{
-// Tenant: "cgrates.org",
+ var rpl string
+ if err := dspEngine2RPC.Call(context.Background(), utils.AttributeSv1Ping, &utils.CGREvent{
+ Tenant: "cgrates.org",
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRouteFromDispatcher2",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl != utils.Pong {
-// t.Errorf("Received: %s", rpl)
-// }
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRouteFromDispatcher2",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl != utils.Pong {
+ t.Errorf("Received: %s", rpl)
+ }
-// if err := dspEngine1RPC.Call(utils.AttributeSv1Ping, &utils.CGREvent{
-// Tenant: "cgrates.org",
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRouteFromDispatcher1",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl != utils.Pong {
-// t.Errorf("Received: %s", rpl)
-// }
+ if err := dspEngine1RPC.Call(context.Background(), utils.AttributeSv1Ping, &utils.CGREvent{
+ Tenant: "cgrates.org",
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRouteFromDispatcher1",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl != utils.Pong {
+ t.Errorf("Received: %s", rpl)
+ }
-// expKeys := []string{"testRouteFromDispatcher2:*attributes", "testRouteFromDispatcher1:*attributes"}
+ expKeys := []string{"testRouteFromDispatcher2:*attributes", "testRouteFromDispatcher1:*attributes"}
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// if err := dspEngine1RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// }
+}
-// func testCacheRplAACheckLoadReplication(t *testing.T) {
-// var rcvKeys []string
-// argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherLoads,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
-// err.Error() != utils.ErrNotFound.Error() {
-// t.Error(err)
-// }
-// if err := dspEngine1RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
-// err.Error() != utils.ErrNotFound.Error() {
-// t.Error(err)
-// }
+func testCacheRplAACheckLoadReplication(t *testing.T) {
+ var rcvKeys []string
+ argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherLoads,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
+ err.Error() != utils.ErrNotFound.Error() {
+ t.Error(err)
+ }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil ||
+ err.Error() != utils.ErrNotFound.Error() {
+ t.Error(err)
+ }
-// var wgDisp1 sync.WaitGroup
-// var wgDisp2 sync.WaitGroup
-// for i := 0; i < 10; i++ {
-// wgDisp1.Add(1)
-// wgDisp2.Add(1)
-// go func() {
-// var rpl []*engine.ChrgSProcessEventReply
-// if err := dspEngine1RPC.Call(utils.ChargerSv1ProcessEvent, &utils.CGREvent{
-// Tenant: "cgrates.org",
-// ID: "testCacheRplAACheckLoadReplication",
-// Event: map[string]interface{}{
-// utils.AccountField: "1007",
-// utils.Destination: "+491511231234",
-// "EventName": "TestLoad",
-// },
+ var wgDisp1 sync.WaitGroup
+ var wgDisp2 sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wgDisp1.Add(1)
+ wgDisp2.Add(1)
+ go func() {
+ var rpl []*engine.ChrgSProcessEventReply
+ if err := dspEngine1RPC.Call(context.Background(), utils.ChargerSv1ProcessEvent, &utils.CGREvent{
+ Tenant: "cgrates.org",
+ ID: "testCacheRplAACheckLoadReplication",
+ Event: map[string]interface{}{
+ utils.AccountField: "1007",
+ utils.Destination: "+491511231234",
+ "EventName": "TestLoad",
+ },
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRouteFromDispatcher1",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl[0].ChargerSProfile != "DefaultCharger" {
-// t.Errorf("Received: %+v", utils.ToJSON(rpl))
-// }
-// wgDisp1.Done()
-// }()
-// go func() {
-// var rpl []*engine.ChrgSProcessEventReply
-// if err := dspEngine2RPC.Call(utils.ChargerSv1ProcessEvent, &utils.CGREvent{
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRouteFromDispatcher1",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl[0].ChargerSProfile != "DefaultCharger" {
+ t.Errorf("Received: %+v", utils.ToJSON(rpl))
+ }
+ wgDisp1.Done()
+ }()
+ go func() {
+ var rpl []*engine.ChrgSProcessEventReply
+ if err := dspEngine2RPC.Call(context.Background(), utils.ChargerSv1ProcessEvent, &utils.CGREvent{
-// Tenant: "cgrates.org",
-// ID: "testCacheRplAACheckLoadReplication",
-// Event: map[string]interface{}{
-// utils.AccountField: "1007",
-// utils.Destination: "+491511231234",
-// "EventName": "TestLoad",
-// },
+ Tenant: "cgrates.org",
+ ID: "testCacheRplAACheckLoadReplication",
+ Event: map[string]interface{}{
+ utils.AccountField: "1007",
+ utils.Destination: "+491511231234",
+ "EventName": "TestLoad",
+ },
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRouteFromDispatcher2",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl[0].ChargerSProfile != "DefaultCharger" {
-// t.Errorf("Received: %+v", utils.ToJSON(rpl))
-// }
-// wgDisp2.Done()
-// }()
-// }
-// wgDisp1.Wait()
-// wgDisp2.Wait()
-// expKeys := []string{"testRouteFromDispatcher1:*attributes",
-// "testRouteFromDispatcher1:*chargers", "testRouteFromDispatcher2:*attributes",
-// "testRouteFromDispatcher2:*chargers"}
-// argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherRoutes,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
-// if err := dspEngine1RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRouteFromDispatcher2",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl[0].ChargerSProfile != "DefaultCharger" {
+ t.Errorf("Received: %+v", utils.ToJSON(rpl))
+ }
+ wgDisp2.Done()
+ }()
+ }
+ wgDisp1.Wait()
+ wgDisp2.Wait()
+ expKeys := []string{"testRouteFromDispatcher1:*attributes",
+ "testRouteFromDispatcher1:*chargers", "testRouteFromDispatcher2:*attributes",
+ "testRouteFromDispatcher2:*chargers"}
+ argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherRoutes,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// expKeys = []string{"cgrates.org:Engine2"}
-// argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherLoads,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
-// if err := dspEngine1RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
-// }
+ expKeys = []string{"cgrates.org:Engine2"}
+ argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherLoads,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
+ if err := dspEngine1RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
+}
-// func testCacheRplCheckLoadReplication(t *testing.T) {
-// var rcvKeys []string
-// argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherLoads,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil || err.Error() != utils.ErrNotFound.Error() {
-// t.Error(err)
-// }
+func testCacheRplCheckLoadReplication(t *testing.T) {
+ var rcvKeys []string
+ argsAPI := utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherLoads,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err == nil || err.Error() != utils.ErrNotFound.Error() {
+ t.Error(err)
+ }
-// var rpl []*engine.ChrgSProcessEventReply
-// var wg sync.WaitGroup
-// for i := 0; i < 10; i++ {
-// wg.Add(1)
-// go func() {
-// if err := dspEngine1RPC.Call(utils.ChargerSv1ProcessEvent, &utils.CGREvent{
+ var rpl []*engine.ChrgSProcessEventReply
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ if err := dspEngine1RPC.Call(context.Background(), utils.ChargerSv1ProcessEvent, &utils.CGREvent{
-// Tenant: "cgrates.org",
-// ID: "testCacheRplCheckLoadReplication",
-// Event: map[string]interface{}{
-// utils.AccountField: "1007",
-// utils.Destination: "+491511231234",
-// "EventName": "TestLoad",
-// },
+ Tenant: "cgrates.org",
+ ID: "testCacheRplCheckLoadReplication",
+ Event: map[string]interface{}{
+ utils.AccountField: "1007",
+ utils.Destination: "+491511231234",
+ "EventName": "TestLoad",
+ },
-// APIOpts: map[string]interface{}{
-// utils.OptsRouteID: "testRoute123",
-// },
-// }, &rpl); err != nil {
-// t.Error(err)
-// } else if rpl[0].ChargerSProfile != "DefaultCharger" {
-// t.Errorf("Received: %+v", utils.ToJSON(rpl))
-// }
-// wg.Done()
-// }()
-// }
-// wg.Wait()
-// expKeys := []string{"testRoute123:*core", "testRoute123:*attributes", "testRoute123:*chargers"}
-// argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherRoutes,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+ APIOpts: map[string]interface{}{
+ utils.OptsRouteID: "testRoute123",
+ },
+ }, &rpl); err != nil {
+ t.Error(err)
+ } else if rpl[0].ChargerSProfile != "DefaultCharger" {
+ t.Errorf("Received: %+v", utils.ToJSON(rpl))
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ expKeys := []string{"testRoute123:*core", "testRoute123:*attributes", "testRoute123:*chargers"}
+ argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherRoutes,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// expKeys = []string{"cgrates.org:Engine2"}
-// argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
-// Tenant: "cgrates.org",
-// ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
-// CacheID: utils.CacheDispatcherLoads,
-// },
-// }
-// if err := dspEngine2RPC.Call(utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
-// t.Error(err.Error())
-// }
-// sort.Strings(rcvKeys)
-// sort.Strings(expKeys)
-// if !reflect.DeepEqual(expKeys, rcvKeys) {
-// t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
-// }
+ expKeys = []string{"cgrates.org:Engine2"}
+ argsAPI = utils.ArgsGetCacheItemIDsWithAPIOpts{
+ Tenant: "cgrates.org",
+ ArgsGetCacheItemIDs: utils.ArgsGetCacheItemIDs{
+ CacheID: utils.CacheDispatcherLoads,
+ },
+ }
+ if err := dspEngine2RPC.Call(context.Background(), utils.CacheSv1GetItemIDs, argsAPI, &rcvKeys); err != nil {
+ t.Error(err.Error())
+ }
+ sort.Strings(rcvKeys)
+ sort.Strings(expKeys)
+ if !reflect.DeepEqual(expKeys, rcvKeys) {
+ t.Errorf("Expected: %+v, received: %+v", expKeys, rcvKeys)
+ }
-// }
+}
-// func testCacheRplStopEngine(t *testing.T) {
-// if err := engine.KillEngine(*waitRater); err != nil {
-// t.Error(err)
-// }
-// }
+func testCacheRplStopEngine(t *testing.T) {
+ if err := engine.KillEngine(*waitRater); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/general_tests/redis_cluster_it_test.go b/general_tests/redis_cluster_it_test.go
index 5e3b112eb..b5c82967b 100644
--- a/general_tests/redis_cluster_it_test.go
+++ b/general_tests/redis_cluster_it_test.go
@@ -21,303 +21,316 @@ along with this program. If not, see
package general_tests
-// import (
-// "bytes"
-// "flag"
-// "fmt"
-// "net/rpc"
-// "os"
-// "os/exec"
-// "path"
-// "reflect"
-// "testing"
-// "time"
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "reflect"
+ "testing"
+ "time"
-// "github.com/cgrates/cgrates/config"
-// "github.com/cgrates/cgrates/engine"
-// "github.com/cgrates/cgrates/utils"
-// )
+ "github.com/cgrates/birpc"
+ "github.com/cgrates/birpc/context"
+ "github.com/cgrates/cgrates/config"
+ "github.com/cgrates/cgrates/engine"
+ "github.com/cgrates/cgrates/utils"
+)
-// /*
-// * Documentation:
-// * This code should work on redis 5 or later:
-// * `redis-cli --cluster create 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006 --cluster-replicas 1`
-// * For redis 4 or before you need to create create the cluster manualy:
-// * - install ruby
-// * - install redis gem: `gem install redis`
-// * - download the `redis-trib.rb` from the source code
-// * - start the 6 nodes with the command `redis-server node1.conf`
-// * - configure the cluster with the following command:
-// * `./redis-trib.rb create --replicas 1 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006`
-// *
-// * To run the tests you need to specify the `redisCluster` flag and have the redis stopped:
-// * `go test github.com/cgrates/cgrates/general_tests -tags=integration -dbtype=*mysql -run=TestRedisCluster -redisCluster -v`
-// *
-// * The configuration of the cluster is the following:
-// * - node1 127.0.0.1:7001 master
-// * - node2 127.0.0.1:7002 master
-// * - node3 127.0.0.1:7003 master
-// * - node4 127.0.0.1:7004 replica
-// * - node5 127.0.0.1:7005 replica
-// * - node6 127.0.0.1:7006 replica
-// * The replicas do not allways select the same master
-// */
+/*
+ * Documentation:
+ * This code should work on redis 5 or later:
+ * `redis-cli --cluster create 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006 --cluster-replicas 1`
+ * For redis 4 or before you need to create create the cluster manualy:
+ * - install ruby
+ * - install redis gem: `gem install redis`
+ * - download the `redis-trib.rb` from the source code
+ * - start the 6 nodes with the command `redis-server node1.conf`
+ * - configure the cluster with the following command:
+ * `./redis-trib.rb create --replicas 1 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006`
+ *
+ * To run the tests you need to specify the `redisCluster` flag and have the redis stopped:
+ * `go test github.com/cgrates/cgrates/general_tests -tags=integration -dbtype=*mysql -run=TestRedisCluster -redisCluster -v`
+ *
+ * The configuration of the cluster is the following:
+ * - node1 127.0.0.1:7001 master
+ * - node2 127.0.0.1:7002 master
+ * - node3 127.0.0.1:7003 master
+ * - node4 127.0.0.1:7004 replica
+ * - node5 127.0.0.1:7005 replica
+ * - node6 127.0.0.1:7006 replica
+ * The replicas do not allways select the same master
+ */
-// var (
-// clsrConfig *config.CGRConfig
-// clsrRPC *rpc.Client
+var (
+ clsrConfig *config.CGRConfig
+ clsrRPC *birpc.Client
-// clsrNodeCfgPath = path.Join(*dataDir, "redisCluster", "node%v.conf")
-// clsrEngineCfgPath = path.Join(*dataDir, "conf", "samples", "redisCluster")
-// clsrNodes = make(map[string]*exec.Cmd)
-// clsrOutput = make(map[string]*bytes.Buffer) // in order to debug if something is not working
-// clsrNoNodes = 6 // this is the minimum number of nodes for a cluster with 1 replica for each master
-// clsrRedisFlag = flag.Bool("redisCluster", false, "Run tests for redis cluster")
-// clsrTests = []func(t *testing.T){
-// testClsrPrepare,
-// testClsrStartNodes,
-// testClsrCreateCluster,
-// testClsrInitConfig,
-// testClsrFlushDb,
-// testClsrStartEngine,
-// testClsrRPCConection,
-// testClsrSetGetAttribute,
-// testClsrStopMaster,
-// testClsrSetGetAttribute2,
-// testClsrReStartMaster,
-// testClsrGetAttribute,
-// testClsrStopNodes,
-// testClsrKillEngine,
-// testClsrDeleteFolder,
-// // testClsrPrintOutput,
-// }
+ clsrNodeCfgPath = path.Join(*dataDir, "redisCluster", "node%v.conf")
+ clsrEngineCfgPath = path.Join(*dataDir, "conf", "samples", "redisCluster")
+ clsrNodes = make(map[string]*exec.Cmd)
+ clsrOutput = make(map[string]*bytes.Buffer) // in order to debug if something is not working
+ clsrNoNodes = 6 // this is the minimum number of nodes for a cluster with 1 replica for each master
+ clsrRedisFlag = flag.Bool("redisCluster", false, "Run tests for redis cluster")
+ clsrTests = []func(t *testing.T){
+ testClsrPrepare,
+ testClsrStartNodes,
+ testClsrCreateCluster,
+ testClsrInitConfig,
+ testClsrFlushDb,
+ testClsrStartEngine,
+ testClsrRPCConection,
+ testClsrSetGetAttribute,
+ testClsrStopMaster,
+ testClsrSetGetAttribute2,
+ testClsrReStartMaster,
+ testClsrGetAttribute,
+ testClsrStopNodes,
+ testClsrKillEngine,
+ testClsrDeleteFolder,
+ // testClsrPrintOutput,
+ }
-// clsrRedisCliArgs = []string{
-// "--cluster", "create",
-// "127.0.0.1:7001",
-// "127.0.0.1:7002",
-// "127.0.0.1:7003",
-// "127.0.0.1:7004",
-// "127.0.0.1:7005",
-// "127.0.0.1:7006",
-// "--cluster-replicas", "1",
-// }
-// )
+ clsrRedisCliArgs = []string{
+ "--cluster", "create",
+ "127.0.0.1:7001",
+ "127.0.0.1:7002",
+ "127.0.0.1:7003",
+ "127.0.0.1:7004",
+ "127.0.0.1:7005",
+ "127.0.0.1:7006",
+ "--cluster-replicas", "1",
+ }
+)
-// const (
-// clsrRedisCmd = "redis-server"
-// clsrRedisCliCmd = "redis-cli"
-// clsrDir = "/tmp/cluster/"
-// )
+const (
+ clsrRedisCmd = "redis-server"
+ clsrRedisCliCmd = "redis-cli"
+ clsrDir = "/tmp/cluster/"
+)
-// func TestRedisCluster(t *testing.T) {
-// if !*clsrRedisFlag {
-// t.SkipNow()
-// }
-// switch *dbType {
-// case utils.MetaMySQL:
-// case utils.MetaInternal,
-// utils.MetaMongo,
-// utils.MetaPostgres:
-// t.SkipNow()
-// default:
-// t.Fatal("Unknown Database type")
-// }
-// for _, stest := range clsrTests {
-// t.Run("TestRedisCluster", stest)
-// }
-// }
+func TestRedisCluster(t *testing.T) {
+ if !*clsrRedisFlag {
+ t.SkipNow()
+ }
+ switch *dbType {
+ case utils.MetaMySQL:
+ case utils.MetaInternal,
+ utils.MetaMongo,
+ utils.MetaPostgres:
+ t.SkipNow()
+ default:
+ t.Fatal("Unknown Database type")
+ }
+ for _, stest := range clsrTests {
+ t.Run("TestRedisCluster", stest)
+ }
+}
-// func testClsrPrepare(t *testing.T) {
-// if err := os.MkdirAll(clsrDir, 0755); err != nil {
-// t.Fatalf("Error creating folder<%s>:%s", clsrDir, err)
-// }
-// }
+func testClsrPrepare(t *testing.T) {
+ if err := os.MkdirAll(clsrDir, 0755); err != nil {
+ t.Fatalf("Error creating folder<%s>:%s", clsrDir, err)
+ }
+}
-// func testClsrStartNodes(t *testing.T) {
-// for i := 1; i <= clsrNoNodes; i++ {
-// path := fmt.Sprintf(clsrNodeCfgPath, i)
-// clsrNodes[path] = exec.Command(clsrRedisCmd, path)
-// clsrOutput[path] = bytes.NewBuffer(nil)
-// clsrNodes[path].Stdout = clsrOutput[path]
-// if err := clsrNodes[path].Start(); err != nil {
-// t.Fatalf("Could not start node %v because %s", i, err)
-// }
-// }
-// }
+func testClsrStartNodes(t *testing.T) {
+ for i := 1; i <= clsrNoNodes; i++ {
+ path := fmt.Sprintf(clsrNodeCfgPath, i)
+ clsrNodes[path] = exec.Command(clsrRedisCmd, path)
+ clsrOutput[path] = bytes.NewBuffer(nil)
+ clsrNodes[path].Stdout = clsrOutput[path]
+ if err := clsrNodes[path].Start(); err != nil {
+ t.Fatalf("Could not start node %v because %s", i, err)
+ }
+ }
+}
-// func testClsrCreateCluster(t *testing.T) {
-// cmd := exec.Command(clsrRedisCliCmd, clsrRedisCliArgs...)
-// cmd.Stdin = bytes.NewBuffer([]byte("yes\n"))
-// var stdOut bytes.Buffer
-// cmd.Stdout = &stdOut
-// if err := cmd.Run(); err != nil {
-// t.Errorf("Could not create the cluster because %s", err)
-// t.Logf("The output was:\n %s", stdOut.String()) // print the output to debug the error
-// }
-// time.Sleep(200 * time.Millisecond)
-// }
+func testClsrCreateCluster(t *testing.T) {
+ cmd := exec.Command(clsrRedisCliCmd, clsrRedisCliArgs...)
+ cmd.Stdin = bytes.NewBuffer([]byte("yes\n"))
+ var stdOut bytes.Buffer
+ cmd.Stdout = &stdOut
+ if err := cmd.Run(); err != nil {
+ t.Errorf("Could not create the cluster because %s", err)
+ t.Logf("The output was:\n %s", stdOut.String()) // print the output to debug the error
+ }
+ time.Sleep(200 * time.Millisecond)
+}
-// func testClsrInitConfig(t *testing.T) {
-// var err error
-// clsrConfig, err = config.NewCGRConfigFromPath(clsrEngineCfgPath)
-// if err != nil {
-// t.Error(err)
-// }
-// clsrConfig.DataFolderPath = *dataDir // Share DataFolderPath through config towards StoreDb for Flush()
-// }
+func testClsrInitConfig(t *testing.T) {
+ var err error
+ clsrConfig, err = config.NewCGRConfigFromPath(context.Background(), clsrEngineCfgPath)
+ if err != nil {
+ t.Error(err)
+ }
+ clsrConfig.DataFolderPath = *dataDir // Share DataFolderPath through config towards StoreDb for Flush()
+}
-// func testClsrFlushDb(t *testing.T) {
-// if err := engine.InitDataDB(clsrConfig); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testClsrFlushDb(t *testing.T) {
+ if err := engine.InitDataDB(clsrConfig); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testClsrStartEngine(t *testing.T) {
-// if _, err := engine.StopStartEngine(clsrEngineCfgPath, 200); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testClsrStartEngine(t *testing.T) {
+ if _, err := engine.StopStartEngine(clsrEngineCfgPath, 200); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testClsrRPCConection(t *testing.T) {
-// var err error
-// clsrRPC, err = newRPCClient(clsrConfig.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
-// if err != nil {
-// t.Fatal(err)
-// }
-// }
+func testClsrRPCConection(t *testing.T) {
+ var err error
+ clsrRPC, err = newRPCClient(clsrConfig.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
+ if err != nil {
+ t.Fatal(err)
+ }
+}
-// func testClsrSetGetAttribute(t *testing.T) {
-// alsPrf := &engine.AttributeProfile{
-// Tenant: "cgrates.org",
-// ID: "ClsrTest",
-// FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
-// Attributes: []*engine.Attribute{
-// {
-// Path: utils.MetaReq + utils.NestingSep + utils.Subject,
-// Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
-// },
-// },
-// Weight: 20,
-// }
-// alsPrf.Compile()
-// var result string
-// if err := clsrRPC.Call(utils.APIerSv1SetAttributeProfile, alsPrf, &result); err != nil {
-// t.Error(err)
-// } else if result != utils.OK {
-// t.Error("Unexpected reply returned", result)
-// }
-// var reply *engine.AttributeProfile
-// if err := clsrRPC.Call(utils.APIerSv1GetAttributeProfile,
-// &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest"}, &reply); err != nil {
-// t.Fatal(err)
-// }
-// reply.Compile()
-// if !reflect.DeepEqual(alsPrf, reply) {
-// t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
-// }
-// }
+func testClsrSetGetAttribute(t *testing.T) {
+ alsPrf := &engine.AttributeProfile{
+ Tenant: "cgrates.org",
+ ID: "ClsrTest",
+ FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
+ Attributes: []*engine.Attribute{
+ {
+ Path: utils.MetaReq + utils.NestingSep + utils.Subject,
+ Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
+ },
+ },
+ Weights: utils.DynamicWeights{
+ {
+ Weight: 20,
+ },
+ },
+ }
+ alsPrf.Compile()
+ var result string
+ if err := clsrRPC.Call(context.Background(), utils.AdminSv1SetAttributeProfile, alsPrf, &result); err != nil {
+ t.Error(err)
+ } else if result != utils.OK {
+ t.Error("Unexpected reply returned", result)
+ }
+ var reply *engine.AttributeProfile
+ if err := clsrRPC.Call(context.Background(), utils.AdminSv1GetAttributeProfile,
+ &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest"}, &reply); err != nil {
+ t.Fatal(err)
+ }
+ reply.Compile()
+ if !reflect.DeepEqual(alsPrf, reply) {
+ t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
+ }
+}
-// func testClsrStopMaster(t *testing.T) {
-// path := fmt.Sprintf(clsrNodeCfgPath, 3)
-// if err = clsrNodes[path].Process.Kill(); err != nil {
-// t.Fatal(err)
-// }
-// time.Sleep(time.Second)
-// }
+func testClsrStopMaster(t *testing.T) {
+ path := fmt.Sprintf(clsrNodeCfgPath, 3)
+ if err = clsrNodes[path].Process.Kill(); err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(time.Second)
+}
-// func testClsrSetGetAttribute2(t *testing.T) {
-// alsPrf := &engine.AttributeProfile{
-// Tenant: "cgrates.org",
-// ID: "ClsrTest",
-// FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
-// Attributes: []*engine.Attribute{
-// {
-// Path: utils.MetaReq + utils.NestingSep + utils.Subject,
-// Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
-// },
-// },
-// Weight: 20,
-// }
-// alsPrf.Compile()
-// var reply *engine.AttributeProfile
-// if err := clsrRPC.Call(utils.APIerSv1GetAttributeProfile,
-// &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest"}, &reply); err != nil {
-// t.Fatal(err)
-// }
-// reply.Compile()
-// if !reflect.DeepEqual(alsPrf, reply) {
-// t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
-// }
-// // add another attribute
-// alsPrf.ID += "2"
-// var result string
-// if err := clsrRPC.Call(utils.APIerSv1SetAttributeProfile, alsPrf, &result); err != nil {
-// t.Error(err)
-// } else if result != utils.OK {
-// t.Error("Unexpected reply returned", result)
-// }
-// }
+func testClsrSetGetAttribute2(t *testing.T) {
+ alsPrf := &engine.AttributeProfile{
+ Tenant: "cgrates.org",
+ ID: "ClsrTest",
+ FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
+ Attributes: []*engine.Attribute{
+ {
+ Path: utils.MetaReq + utils.NestingSep + utils.Subject,
+ Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
+ },
+ },
+ Weights: utils.DynamicWeights{
+ {
+ Weight: 20,
+ },
+ },
+ }
+ alsPrf.Compile()
+ var reply *engine.AttributeProfile
+ if err := clsrRPC.Call(context.Background(), utils.AdminSv1GetAttributeProfile,
+ &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest"}, &reply); err != nil {
+ t.Fatal(err)
+ }
+ reply.Compile()
+ if !reflect.DeepEqual(alsPrf, reply) {
+ t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
+ }
+ // add another attribute
+ alsPrf.ID += "2"
+ var result string
+ if err := clsrRPC.Call(context.Background(), utils.AdminSv1SetAttributeProfile, alsPrf, &result); err != nil {
+ t.Error(err)
+ } else if result != utils.OK {
+ t.Error("Unexpected reply returned", result)
+ }
+}
-// func testClsrReStartMaster(t *testing.T) {
-// path := fmt.Sprintf(clsrNodeCfgPath, 3)
-// clsrNodes[path] = exec.Command(clsrRedisCmd, path)
-// clsrOutput[path] = bytes.NewBuffer(nil)
-// clsrNodes[path].Stdout = clsrOutput[path]
-// if err := clsrNodes[path].Start(); err != nil {
-// t.Fatalf("Could not start node %v because %s", 3, err)
-// }
-// time.Sleep(200 * time.Millisecond)
-// }
+func testClsrReStartMaster(t *testing.T) {
+ path := fmt.Sprintf(clsrNodeCfgPath, 3)
+ clsrNodes[path] = exec.Command(clsrRedisCmd, path)
+ clsrOutput[path] = bytes.NewBuffer(nil)
+ clsrNodes[path].Stdout = clsrOutput[path]
+ if err := clsrNodes[path].Start(); err != nil {
+ t.Fatalf("Could not start node %v because %s", 3, err)
+ }
+ time.Sleep(200 * time.Millisecond)
+}
-// func testClsrGetAttribute(t *testing.T) {
-// alsPrf := &engine.AttributeProfile{
-// Tenant: "cgrates.org",
-// ID: "ClsrTest2",
-// FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
-// Attributes: []*engine.Attribute{
-// {
-// Path: utils.MetaReq + utils.NestingSep + utils.Subject,
-// Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
-// },
-// },
-// Weight: 20,
-// }
-// alsPrf.Compile()
-// var reply *engine.AttributeProfile
-// if err := clsrRPC.Call(utils.APIerSv1GetAttributeProfile,
-// &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest2"}, &reply); err != nil {
-// t.Fatal(err)
-// }
-// reply.Compile()
-// if !reflect.DeepEqual(alsPrf, reply) {
-// t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
-// }
-// }
+func testClsrGetAttribute(t *testing.T) {
+ alsPrf := &engine.AttributeProfile{
+ Tenant: "cgrates.org",
+ ID: "ClsrTest2",
+ FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*opts.*context:*sessions|*cdrs"},
+ Attributes: []*engine.Attribute{
+ {
+ Path: utils.MetaReq + utils.NestingSep + utils.Subject,
+ Value: config.NewRSRParsersMustCompile("1001", utils.InfieldSep),
+ },
+ },
+ Weights: utils.DynamicWeights{
+ {
+ Weight: 20,
+ },
+ },
+ }
+ alsPrf.Compile()
+ var reply *engine.AttributeProfile
+ if err := clsrRPC.Call(context.Background(), utils.AdminSv1GetAttributeProfile,
+ &utils.TenantID{Tenant: "cgrates.org", ID: "ClsrTest2"}, &reply); err != nil {
+ t.Fatal(err)
+ }
+ reply.Compile()
+ if !reflect.DeepEqual(alsPrf, reply) {
+ t.Errorf("Expecting : %+v, received: %+v", alsPrf, reply)
+ }
+}
-// func testClsrStopNodes(t *testing.T) {
-// for path, node := range clsrNodes {
-// if err := node.Process.Kill(); err != nil {
-// t.Fatalf("Could not stop node with path <%s> because %s", path, err)
-// }
-// }
-// }
+func testClsrStopNodes(t *testing.T) {
+ for path, node := range clsrNodes {
+ if err := node.Process.Kill(); err != nil {
+ t.Fatalf("Could not stop node with path <%s> because %s", path, err)
+ }
+ }
+}
-// func testClsrPrintOutput(t *testing.T) {
-// for path, node := range clsrOutput {
-// t.Logf("The output of the node <%s> is:\n%s", path, node.String())
-// t.Logf("==========================================================")
-// }
-// }
+func testClsrPrintOutput(t *testing.T) {
+ for path, node := range clsrOutput {
+ t.Logf("The output of the node <%s> is:\n%s", path, node.String())
+ t.Logf("==========================================================")
+ }
+}
-// func testClsrKillEngine(t *testing.T) {
-// if err := engine.KillEngine(200); err != nil {
-// t.Error(err)
-// }
-// }
+func testClsrKillEngine(t *testing.T) {
+ if err := engine.KillEngine(200); err != nil {
+ t.Error(err)
+ }
+}
-// func testClsrDeleteFolder(t *testing.T) {
-// if err := os.RemoveAll(clsrDir); err != nil {
-// t.Fatalf("Error removing folder<%s>: %s", clsrDir, err)
-// }
-// }
+func testClsrDeleteFolder(t *testing.T) {
+ if err := os.RemoveAll(clsrDir); err != nil {
+ t.Fatalf("Error removing folder<%s>: %s", clsrDir, err)
+ }
+}
diff --git a/general_tests/redis_tls_it_test.go b/general_tests/redis_tls_it_test.go
index e5ebd6268..34c975852 100644
--- a/general_tests/redis_tls_it_test.go
+++ b/general_tests/redis_tls_it_test.go
@@ -21,102 +21,103 @@ along with this program. If not, see
package general_tests
-// import (
-// "flag"
-// "net/rpc"
-// "os/exec"
-// "path"
-// "testing"
+import (
+ "flag"
+ "os/exec"
+ "path"
+ "testing"
-// "github.com/cgrates/cgrates/utils"
+ "github.com/cgrates/birpc"
+ "github.com/cgrates/birpc/context"
+ "github.com/cgrates/cgrates/utils"
-// "github.com/cgrates/cgrates/config"
-// "github.com/cgrates/cgrates/engine"
-// )
+ "github.com/cgrates/cgrates/config"
+ "github.com/cgrates/cgrates/engine"
+)
-// var (
-// redisTLS = flag.Bool("redisTLS", false, "Run tests with redis tls")
-// redisTLSServer *exec.Cmd
-// redisTLSEngineCfg = path.Join(*dataDir, "conf", "samples", "redisTLS")
-// redisTLSCfg *config.CGRConfig
-// redisTLSRPC *rpc.Client
+var (
+ redisTLS = flag.Bool("redisTLS", false, "Run tests with redis tls")
+ redisTLSServer *exec.Cmd
+ redisTLSEngineCfg = path.Join(*dataDir, "conf", "samples", "redisTLS")
+ redisTLSCfg *config.CGRConfig
+ redisTLSRPC *birpc.Client
-// sTestsRedisTLS = []func(t *testing.T){
-// testRedisTLSStartServer,
-// testRedisTLSInitConfig,
-// testRedisTLSFlushDb,
-// testRedisTLSStartEngine,
-// testRedisTLSRPCCon,
-// testRedisTLSSetGetAttribute,
-// testRedisTLSKillEngine,
-// }
-// )
+ sTestsRedisTLS = []func(t *testing.T){
+ testRedisTLSStartServer,
+ testRedisTLSInitConfig,
+ testRedisTLSFlushDb,
+ testRedisTLSStartEngine,
+ testRedisTLSRPCCon,
+ testRedisTLSSetGetAttribute,
+ testRedisTLSKillEngine,
+ }
+)
-// // Before running these tests first you need to make sure you build the redis server with TLS support
-// // https://redis.io/topics/encryption
-// func TestRedisTLS(t *testing.T) {
-// if !*redisTLS {
-// return
-// }
-// for _, stest := range sTestsRedisTLS {
-// t.Run("TestRedisTLS", stest)
-// }
-// }
+// Before running these tests first you need to make sure you build the redis server with TLS support
+// https://redis.io/topics/encryption
+func TestRedisTLS(t *testing.T) {
+ if !*redisTLS {
+ return
+ }
+ for _, stest := range sTestsRedisTLS {
+ t.Run("TestRedisTLS", stest)
+ }
+}
-// func testRedisTLSStartServer(t *testing.T) {
-// // start the server with the server.crt server.key and ca.crt from /data/tls ( self sign certificate )
-// args := []string{
-// "--tls-port", "6400", "--port", "0", "--tls-cert-file", "/usr/share/cgrates/tls/server.crt",
-// "--tls-key-file", "/usr/share/cgrates/tls/server.key", "--tls-ca-cert-file", "/usr/share/cgrates/tls/ca.crt",
-// }
-// redisTLSServer = exec.Command("redis-server", args...)
-// if err := redisTLSServer.Start(); err != nil {
-// t.Error(err)
-// }
-// }
+func testRedisTLSStartServer(t *testing.T) {
+ // start the server with the server.crt server.key and ca.crt from /data/tls ( self sign certificate )
+ args := []string{
+ "--tls-port", "6400", "--port", "0", "--tls-cert-file", "/usr/share/cgrates/tls/server.crt",
+ "--tls-key-file", "/usr/share/cgrates/tls/server.key", "--tls-ca-cert-file", "/usr/share/cgrates/tls/ca.crt",
+ }
+ redisTLSServer = exec.Command("redis-server", args...)
+ if err := redisTLSServer.Start(); err != nil {
+ t.Error(err)
+ }
+}
-// func testRedisTLSInitConfig(t *testing.T) {
-// var err error
-// redisTLSCfg, err = config.NewCGRConfigFromPath(redisTLSEngineCfg)
-// if err != nil {
-// t.Error(err)
-// }
-// }
+func testRedisTLSInitConfig(t *testing.T) {
+ var err error
+ redisTLSCfg, err = config.NewCGRConfigFromPath(context.Background(), redisTLSEngineCfg)
+ if err != nil {
+ t.Error(err)
+ }
+}
-// func testRedisTLSFlushDb(t *testing.T) {
-// if err := engine.InitDataDB(redisTLSCfg); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testRedisTLSFlushDb(t *testing.T) {
+ if err := engine.InitDataDB(redisTLSCfg); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testRedisTLSStartEngine(t *testing.T) {
-// // for the engine we will use the client.crt client.key and ca.crt
-// if _, err := engine.StopStartEngine(redisTLSEngineCfg, 2000); err != nil {
-// t.Fatal(err)
-// }
-// }
+func testRedisTLSStartEngine(t *testing.T) {
+ // for the engine we will use the client.crt client.key and ca.crt
+ if _, err := engine.StopStartEngine(redisTLSEngineCfg, 2000); err != nil {
+ t.Fatal(err)
+ }
+}
-// func testRedisTLSRPCCon(t *testing.T) {
-// var err error
-// redisTLSRPC, err = newRPCClient(redisTLSCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
-// if err != nil {
-// t.Fatal(err)
-// }
-// }
+func testRedisTLSRPCCon(t *testing.T) {
+ var err error
+ redisTLSRPC, err = newRPCClient(redisTLSCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
+ if err != nil {
+ t.Fatal(err)
+ }
+}
-// func testRedisTLSSetGetAttribute(t *testing.T) {
-// // status command to check if the engine starts
-// var rply map[string]interface{}
-// if err := redisTLSRPC.Call(utils.CoreSv1Status, &utils.TenantWithAPIOpts{}, &rply); err != nil {
-// t.Error(err)
-// }
-// }
+func testRedisTLSSetGetAttribute(t *testing.T) {
+ // status command to check if the engine starts
+ var rply map[string]interface{}
+ if err := redisTLSRPC.Call(context.Background(), utils.CoreSv1Status, &utils.TenantWithAPIOpts{}, &rply); err != nil {
+ t.Error(err)
+ }
+}
-// func testRedisTLSKillEngine(t *testing.T) {
-// if err := engine.KillEngine(2000); err != nil {
-// t.Error(err)
-// }
-// if err := exec.Command("pkill", "redis-server").Run(); err != nil {
-// t.Error(err)
-// }
-// }
+func testRedisTLSKillEngine(t *testing.T) {
+ if err := engine.KillEngine(2000); err != nil {
+ t.Error(err)
+ }
+ if err := exec.Command("pkill", "redis-server").Run(); err != nil {
+ t.Error(err)
+ }
+}