Revise registrarc integration test

This commit is contained in:
ionutboangiu
2024-10-16 11:25:53 +03:00
committed by Dan Christian Bogos
parent 531affc8ae
commit dd95a2837a
9 changed files with 160 additions and 747 deletions

View File

@@ -1,117 +0,0 @@
{
// CGRateS Configuration file
//
"general": {
"node_id": "ALL2",
"log_level": 7
},
"listen": {
"rpc_json": ":7012",
"rpc_gob": ":7013",
"http": ":7080",
},
"data_db": {
"db_type": "mongo",
"db_name": "12",
"db_port": 27017,
},
"stor_db": {
"db_type": "mongo",
"db_name": "cgrates",
"db_port": 27017,
"db_password": "",
},
"rpc_conns": {
"conn1": {
"strategy": "*first",
"conns": [{"address": "127.0.0.1:7012", "transport":"*json"}],
},
"dispConn": {
"strategy": "*first",
"conns": [{"address": "http://127.0.0.1:2080/registrar", "transport":"*http_jsonrpc"}]
}
},
"attributes": {
"enabled": true
},
"rals": {
"enabled": true,
},
"schedulers": {
"enabled": true,
},
"chargers": {
"enabled": true,
"attributes_conns": ["*internal"],
},
"thresholds": {
"enabled": true,
},
"routes": {
"enabled": true,
"rals_conns": ["*internal"],
},
"stats": {
"enabled": true,
},
"resources": {
"enabled": true,
},
"cdrs": {
"enabled": true,
"chargers_conns":["*internal"],
"rals_conns": ["*internal"],
},
"sessions": {
"enabled": true,
"listen_bijson": ":7014",
"routes_conns": ["*internal"],
"resources_conns": ["*internal"],
"attributes_conns": ["*internal"],
"rals_conns": ["*internal"],
"cdrs_conns": ["*internal"],
"chargers_conns": ["*internal"],
},
"apiers": {
"enabled": true,
"caches_conns":["conn1"],
"scheduler_conns": ["*internal"],
},
"registrarc":{
"dispatchers":{
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [
{
"Tenant":"*default",
"ID":"ALL2",
"transport": "*json",
"tls": false}
],
"refresh_interval": "1s",
},
}
}

View File

@@ -1,110 +0,0 @@
{
"general": {
"node_id": "ALL2",
"log_level": 7
},
"listen": {
"rpc_json": ":7012",
"rpc_gob": ":7013",
"http": ":7080"
},
"data_db": {
"db_type": "redis",
"db_port": 6379,
"db_name": "12"
},
"stor_db": {
"db_password": "CGRateS.org"
},
"rpc_conns": {
"conn1": {
"strategy": "*first",
"conns": [{"address": "127.0.0.1:7012", "transport":"*json"}]
},
"dispConn": {
"strategy": "*first",
"conns": [{"address": "http://127.0.0.1:2080/registrar", "transport":"*http_jsonrpc"}]
}
},
"attributes": {
"enabled": true
},
"rals": {
"enabled": true
},
"schedulers": {
"enabled": true
},
"chargers": {
"enabled": true,
"attributes_conns": ["*internal"]
},
"thresholds": {
"enabled": true
},
"routes": {
"enabled": true,
"rals_conns": ["*internal"]
},
"stats": {
"enabled": true
},
"resources": {
"enabled": true
},
"cdrs": {
"enabled": true,
"chargers_conns":["*internal"],
"rals_conns": ["*internal"]
},
"sessions": {
"enabled": true,
"listen_bijson": ":7014",
"routes_conns": ["*internal"],
"resources_conns": ["*internal"],
"attributes_conns": ["*internal"],
"rals_conns": ["*internal"],
"cdrs_conns": ["*internal"],
"chargers_conns": ["*internal"]
},
"apiers": {
"enabled": true,
"caches_conns":["conn1"],
"scheduler_conns": ["*internal"]
},
"registrarc":{
"dispatchers":{
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [
{
"Tenant": "*default",
"ID":"ALL2",
"transport": "*json",
"tls": false
}
],
"refresh_interval": "1s"
}
}
}

View File

@@ -1,123 +0,0 @@
{
// CGRateS Configuration file
//
"general": {
"node_id": "ALL",
"log_level": 7
},
"listen": {
"rpc_json": ":6012",
"rpc_gob": ":6013",
"http": ":6080",
},
"data_db": {
"db_type": "mongo",
"db_name": "11",
"db_port": 27017,
},
"stor_db": {
"db_type": "mongo",
"db_name": "cgrates",
"db_port": 27017,
"db_password": "",
},
"rpc_conns": {
"conn1": {
"strategy": "*first",
"conns": [{"address": "127.0.0.1:6012", "transport":"*json"}],
},
"rplConn": {
"strategy": "*broadcast_sync",
"conns": [{"address": "127.0.0.1:7012", "transport":"*json"}],
},
"dispConn": {
"strategy": "*first",
"conns": [{"address": "http://127.0.0.1:2080/registrar", "transport":"*http_jsonrpc"}]
}
},
"attributes": {
"enabled": true
},
"rals": {
"enabled": true,
},
"schedulers": {
"enabled": true,
},
"chargers": {
"enabled": true,
"attributes_conns": ["*internal"],
},
"thresholds": {
"enabled": true,
},
"routes": {
"enabled": true,
"rals_conns": ["*internal"]
},
"stats": {
"enabled": true,
},
"resources": {
"enabled": true,
},
"cdrs": {
"enabled": true,
"chargers_conns":["*internal"],
"rals_conns": ["*internal"],
},
"sessions": {
"enabled": true,
"listen_bijson": ":6014",
"routes_conns": ["*internal"],
"resources_conns": ["*internal"],
"attributes_conns": ["*internal"],
"rals_conns": ["*internal"],
"chargers_conns": ["*internal"],
"cdrs_conns": ["*internal"],
},
"apiers": {
"enabled": true,
"caches_conns":["conn1"],
"scheduler_conns": ["*internal"],
},
"registrarc":{
"dispatchers":{
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [
{
"Tenant": "*default",
"ID":"ALL2",
"transport": "*json",
"tls": false
},
],
"refresh_interval": "1s",
},
}
}

View File

@@ -1,121 +0,0 @@
{
// CGRateS Configuration file
//
"general": {
"node_id": "ALL",
"log_level": 7
},
"listen": {
"rpc_json": ":6012",
"rpc_gob": ":6013",
"http": ":6080",
},
"data_db": { // database used to store runtime data (eg: accounts, cdr stats)
"db_type": "redis", // data_db type: <redis|mongo>
"db_port": 6379, // data_db port to reach the database
"db_name": "11", // data_db database name to connect to
},
"stor_db": {
"db_password": "CGRateS.org",
},
"rpc_conns": {
"conn1": {
"strategy": "*first",
"conns": [{"address": "127.0.0.1:6012", "transport":"*json"}],
},
"rplConn": {
"strategy": "*broadcast_sync",
"conns": [{"address": "127.0.0.1:7012", "transport":"*json"}],
},
"dispConn": {
"strategy": "*first",
"conns": [{"address": "http://127.0.0.1:2080/registrar", "transport":"*http_jsonrpc"}]
}
},
"attributes": {
"enabled": true
},
"rals": {
"enabled": true,
},
"schedulers": {
"enabled": true,
},
"chargers": {
"enabled": true,
"attributes_conns": ["*internal"],
},
"thresholds": {
"enabled": true,
},
"routes": {
"enabled": true,
"rals_conns": ["*internal"],
},
"stats": {
"enabled": true,
},
"resources": {
"enabled": true,
},
"cdrs": {
"enabled": true,
"chargers_conns":["*internal"],
"rals_conns": ["*internal"],
},
"sessions": {
"enabled": true,
"listen_bijson": ":6014",
"routes_conns": ["*internal"],
"resources_conns": ["*internal"],
"attributes_conns": ["*internal"],
"rals_conns": ["*internal"],
"chargers_conns": ["*internal"],
"cdrs_conns": ["*internal"],
},
"apiers": {
"enabled": true,
"caches_conns":["conn1"],
"scheduler_conns": ["*internal"],
},
"registrarc":{
"dispatchers":{
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [
{
"Tenant": "*default",
"ID":"ALL2",
"transport": "*json",
"tls": false
},
],
"refresh_interval": "1s",
},
}
}

View File

@@ -1,75 +0,0 @@
{
// Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
// Copyright (C) ITsysCOM GmbH
//
// This file contains the default configuration hardcoded into CGRateS.
// This is what you get when you load CGRateS with an empty configuration file.
"general": {
"node_id": "DispatcherS1",
"log_level": 7,
"reconnects": 1,
},
"listen": {
"rpc_json": ":2012",
"rpc_gob": ":2013",
"http": ":2080",
},
"data_db": {
"db_type": "mongo",
"db_name": "10",
"db_port": 27017,
},
"stor_db": {
"db_type": "mongo",
"db_name": "cgrates",
"db_port": 27017,
"db_password": "",
},
"caches":{
"partitions": {
"*dispatcher_routes": {"limit": -1, "ttl": "2s"},
"*dispatcher_hosts": {"limit": -1, "ttl": "2s"}
},
},
"schedulers": {
"enabled": true,
},
"rals": {
"enabled": true,
},
"chargers": {
"enabled": true,
},
"sessions": {
"enabled": true,
"rals_conns": ["*localhost"],
"resources_conns": ["*localhost"],
"chargers_conns": ["*localhost"],
"listen_bijson": ":3014",
},
"dispatchers":{
"enabled": true,
},
"apiers": {
"enabled": true,
"scheduler_conns": ["*internal"],
},
}

View File

@@ -1,63 +0,0 @@
{
// Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
// Copyright (C) ITsysCOM GmbH
//
// This file contains the default configuration hardcoded into CGRateS.
// This is what you get when you load CGRateS with an empty configuration file.
"general": {
"node_id": "DispatcherS1",
"log_level": 7,
"reconnects": 1,
},
"listen": {
"rpc_json": ":2012",
"rpc_gob": ":2013",
"http": ":2080",
},
"stor_db": {
"db_type":"*internal",
},
"caches":{
"partitions": {
"*dispatcher_routes": {"limit": -1, "ttl": "2s"},
"*dispatcher_hosts": {"limit": -1, "ttl": "2s"}
},
},
"schedulers": {
"enabled": true,
},
"rals": {
"enabled": true,
},
"chargers": {
"enabled": true,
},
"sessions": {
"enabled": true,
"rals_conns": ["*localhost"],
"resources_conns": ["*localhost"],
"chargers_conns": ["*localhost"],
"listen_bijson": ":3014",
},
"dispatchers":{
"enabled": true,
},
"apiers": {
"enabled": true,
"scheduler_conns": ["*internal"],
},
}

View File

@@ -1,3 +0,0 @@
#Tenant,ID,Subsystems,FilterIDs,ActivationInterval,Strategy,StrategyParameters,ConnID,ConnFilterIDs,ConnWeight,ConnBlocker,ConnParameters,Weight
cgrates.org,DSP1,*any,,,*weight,,ALL,,20,false,,10
cgrates.org,DSP1,,,,,,ALL2,,10,,,
1 #Tenant ID Subsystems FilterIDs ActivationInterval Strategy StrategyParameters ConnID ConnFilterIDs ConnWeight ConnBlocker ConnParameters Weight
2 cgrates.org DSP1 *any *weight ALL 20 false 10
3 cgrates.org DSP1 ALL2 10

View File

@@ -1,5 +1,4 @@
//go:build integration
// +build integration
/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
@@ -22,168 +21,156 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package registrarc
import (
"bytes"
"os/exec"
"path"
"fmt"
"testing"
"time"
"github.com/cgrates/birpc/context"
"github.com/cgrates/birpc"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
var (
dspDir string
dspCfgPath string
dspCfg *config.CGRConfig
dspCmd *exec.Cmd
dspRPC *birpc.Client
allDir string
allCfgPath string
allCmd *exec.Cmd
all2Dir string
all2CfgPath string
all2Cmd *exec.Cmd
dsphTest = []func(t *testing.T){
testDsphInitCfg,
testDsphInitDB,
testDsphStartEngine,
testDsphLoadData,
testDsphBeforeDsphStart,
testDsphStartAll2,
testDsphStartAll,
testDsphStopEngines,
testDsphStopDispatcher,
func TestRegistrarC(t *testing.T) {
dbCfg := engine.DBCfg{
StorDB: &engine.DBParams{
Type: utils.StringPointer(utils.MetaInternal),
},
}
)
func TestDspHosts(t *testing.T) {
switch *utils.DBType {
case utils.MetaMySQL:
allDir = "all_mysql"
all2Dir = "all2_mysql"
dspDir = "dispatchers_mysql"
case utils.MetaMongo:
allDir = "all_mongo"
all2Dir = "all2_mongo"
dspDir = "dispatchers_mongo"
dbCfg.DataDB = engine.MongoDBCfg.DataDB
case utils.MetaInternal, utils.MetaPostgres:
t.SkipNow()
default:
t.Fatal("Unknown Database type")
}
for _, stest := range dsphTest {
t.Run(dspDir, stest)
}
}
func testDsphInitCfg(t *testing.T) {
dspCfgPath = path.Join(*utils.DataDir, "conf", "samples", "registrarc", dspDir)
allCfgPath = path.Join(*utils.DataDir, "conf", "samples", "registrarc", allDir)
all2CfgPath = path.Join(*utils.DataDir, "conf", "samples", "registrarc", all2Dir)
var err error
if dspCfg, err = config.NewCGRConfigFromPath(dspCfgPath); err != nil {
t.Error(err)
const (
dspCfg = `{
"general": {
"node_id": "dispatcher",
"reconnects": 1
},
"caches": {
"partitions": {
"*dispatcher_hosts": {
"limit": -1,
"ttl": "150ms"
}
}
},
"dispatchers": {
"enabled": true
}
}`
workerCfg = `{
"general": {
"node_id": "%s"
},
"listen": {
"rpc_json": ":%[2]d12",
"rpc_gob": ":%[2]d13",
"http": ":%[2]d80"
},
"rpc_conns": {
"dispConn": {
"strategy": "*first",
"conns": [{
"address": "http://127.0.0.1:2080/registrar",
"transport": "*http_jsonrpc"
}]
}
},
"registrarc": {
"dispatchers": {
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [{
"Tenant": "*default",
"ID": "hostB",
"transport": "*json",
"tls": false
}],
"refresh_interval": "1s"
}
}
}`
)
func testDsphInitDB(t *testing.T) {
if err := engine.InitDataDb(dspCfg); err != nil {
t.Fatal(err)
disp := engine.TestEngine{
ConfigJSON: dspCfg,
DBCfg: dbCfg,
}
if err := engine.InitStorDb(dspCfg); err != nil {
t.Fatal(err)
}
}
client, cfg := disp.Run(t)
func testDsphStartEngine(t *testing.T) {
var err error
if dspCmd, err = engine.StopStartEngine(dspCfgPath, *utils.WaitRater); err != nil {
t.Fatal(err)
tpFiles := map[string]string{
utils.DispatcherProfilesCsv: `#Tenant,ID,Subsystems,FilterIDs,ActivationInterval,Strategy,StrategyParameters,ConnID,ConnFilterIDs,ConnWeight,ConnBlocker,ConnParameters,Weight
cgrates.org,dsp_test,,,,*weight,,hostA,,20,,,
cgrates.org,dsp_test,,,,,,hostB,,10,,,`,
}
dspRPC, err = newRPCClient(dspCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
if err != nil {
t.Fatal("Could not connect to rater: ", err.Error())
}
}
func testDsphLoadData(t *testing.T) {
loader := exec.Command("cgr-loader", "-config_path", dspCfgPath, "-path", path.Join(*utils.DataDir, "tariffplans", "registrarc"), "-caches_address=")
output := bytes.NewBuffer(nil)
outerr := bytes.NewBuffer(nil)
loader.Stdout = output
loader.Stderr = outerr
if err := loader.Run(); err != nil {
t.Log(loader.Args)
t.Log(output.String())
t.Log(outerr.String())
t.Fatal(err)
}
}
engine.LoadCSVsWithCGRLoader(t, cfg.ConfigPath, "", nil, tpFiles, "-caches_address=")
func testDsphGetNodeID() (id string, err error) {
var status map[string]any
if err = dspRPC.Call(context.Background(), utils.DispatcherSv1RemoteStatus, utils.TenantWithAPIOpts{
Tenant: "cgrates.org",
APIOpts: map[string]any{},
}, &status); err != nil {
return
checkNodeID := func(t *testing.T, expected string) {
t.Helper()
var status map[string]any
err := client.Call(context.Background(), utils.CoreSv1Status,
utils.TenantWithAPIOpts{
Tenant: "cgrates.org",
APIOpts: map[string]any{},
}, &status)
if err != nil && expected != "" {
t.Fatalf("DispatcherSv1.RemoteStatus unexpected err: %v", err)
}
nodeID := utils.IfaceAsString(status[utils.NodeID])
if expected == "" &&
(err == nil || err.Error() != utils.ErrDSPHostNotFound.Error()) {
t.Errorf("DispatcherSv1.RemoteStatus err=%q, want %q", err, utils.ErrDSPHostNotFound)
}
if nodeID != expected {
t.Errorf("DispatcherSv1.RemoteStatus nodeID=%q, want %q", nodeID, expected)
}
}
return utils.IfaceAsString(status[utils.NodeID]), nil
}
func testDsphBeforeDsphStart(t *testing.T) {
if _, err := testDsphGetNodeID(); err == nil || err.Error() != utils.ErrDSPHostNotFound.Error() {
t.Errorf("Expected error: %s received: %v", utils.ErrDSPHostNotFound, err)
}
}
/*
Currently, only a dispatcher profile can be found in dataDB.
It references 2 hosts that don't exist yet: hostA (weight=20) and hostB (weight=10).
Its sorting strategy is "*weight".
*/
func testDsphStartAll2(t *testing.T) {
var err error
if all2Cmd, err = engine.StartEngine(all2CfgPath, *utils.WaitRater); err != nil {
t.Fatal(err)
}
if nodeID, err := testDsphGetNodeID(); err != nil {
t.Fatal(err)
} else if nodeID != "ALL2" {
t.Errorf("Expected nodeID: %q ,received: %q", "ALL2", nodeID)
}
}
checkNodeID(t, "") // no hosts registered yet; will fail
func testDsphStartAll(t *testing.T) {
var err error
if allCmd, err = engine.StartEngine(allCfgPath, *utils.WaitRater); err != nil {
t.Fatal(err)
}
if nodeID, err := testDsphGetNodeID(); err != nil {
t.Fatal(err)
} else if nodeID != "ALL" {
t.Errorf("Expected nodeID: %q ,received: %q", "ALL", nodeID)
}
}
// Workers will be automatically closed at the end of the subtest.
t.Run("start workers and dispatch", func(t *testing.T) {
workerB := engine.TestEngine{
ConfigJSON: fmt.Sprintf(workerCfg, "workerB", 70),
DBCfg: dbCfg,
PreserveDataDB: true,
PreserveStorDB: true,
}
workerB.Run(t)
func testDsphStopEngines(t *testing.T) {
if err := allCmd.Process.Kill(); err != nil {
t.Fatal(err)
}
if err := all2Cmd.Process.Kill(); err != nil {
t.Fatal(err)
}
time.Sleep(2 * time.Second)
if _, err := testDsphGetNodeID(); err == nil || err.Error() != utils.ErrDSPHostNotFound.Error() {
t.Errorf("Expected error: %s received: %v", utils.ErrDSPHostNotFound, err)
}
}
// workerB is now active and has registered hostB.
// The status request will be dispatched to hostB, because
// hostA, which should have had priority, has not yet been
// registered.
checkNodeID(t, "workerB")
func testDsphStopDispatcher(t *testing.T) {
if err := engine.KillEngine(*utils.WaitRater); err != nil {
t.Error(err)
}
workerA := engine.TestEngine{
ConfigJSON: fmt.Sprintf(workerCfg, "workerA", 60),
DBCfg: dbCfg,
PreserveDataDB: true,
PreserveStorDB: true,
}
workerA.Run(t)
// workerA is now active and has overwritten hostB's port with
// its own, instead of registering hostA. The request will be
// dispatched based on hostB again.
checkNodeID(t, "workerA")
})
time.Sleep(150 * time.Millisecond) // wait for cached hosts to expire
checkNodeID(t, "") // no hosts left
}

View File

@@ -21,7 +21,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package services
import (
"path"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
@@ -69,10 +70,47 @@ func TestDispatcherHReload(t *testing.T) {
t.Errorf("Expected service to be down")
}
cfgPath := t.TempDir()
filePath := filepath.Join(cfgPath, "cgrates.json")
if err := os.WriteFile(filePath, []byte(`{
"general": {
"node_id": "ALL"
},
"listen": {
"rpc_json": ":6012",
"rpc_gob": ":6013",
"http": ":6080"
},
"rpc_conns": {
"dispConn": {
"strategy": "*first",
"conns": [{
"address": "http://127.0.0.1:2080/registrar",
"transport": "*http_jsonrpc"
}]
}
},
"registrarc": {
"dispatchers": {
"enabled": true,
"registrars_conns": ["dispConn"],
"hosts": [{
"Tenant": "*default",
"ID": "hostB",
"transport": "*json",
"tls": false
}],
"refresh_interval": "1s"
}
}
}`), 0644); err != nil {
t.Fatal(err)
}
var reply string
if err := cfg.V1ReloadConfig(context.Background(),
&config.ReloadArgs{
Path: path.Join("/usr", "share", "cgrates", "conf", "samples", "registrarc", "all_mongo"),
Path: cfgPath,
Section: config.RegistrarCJson,
}, &reply); err != nil {
t.Fatal(err)