mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-11 18:16:24 +05:00
Updated general_test integration tests for gob
This commit is contained in:
@@ -21,6 +21,8 @@ package general_tests
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
@@ -42,8 +44,20 @@ var (
|
||||
a1CfgPath string
|
||||
a1Cfg *config.CGRConfig
|
||||
a1rpc *rpc.Client
|
||||
encoding = flag.String("rpc", utils.MetaJSON, "what encoding whould be uused for rpc comunication")
|
||||
)
|
||||
|
||||
func newRPCClient(cfg *config.ListenCfg) (c *rpc.Client, err error) {
|
||||
switch *encoding {
|
||||
case utils.MetaJSON:
|
||||
return jsonrpc.Dial(utils.TCP, cfg.RPCJSONListen)
|
||||
case utils.MetaGOB:
|
||||
return rpc.Dial(utils.TCP, cfg.RPCGOBListen)
|
||||
default:
|
||||
return nil, errors.New("UNSUPPORTED_RPC")
|
||||
}
|
||||
}
|
||||
|
||||
var sTestsA1it = []func(t *testing.T){
|
||||
testA1itLoadConfig,
|
||||
testA1itResetDataDB,
|
||||
@@ -105,7 +119,7 @@ func testA1itStartEngine(t *testing.T) {
|
||||
|
||||
func testA1itRPCConn(t *testing.T) {
|
||||
var err error
|
||||
a1rpc, err = jsonrpc.Dial("tcp", a1Cfg.ListenCfg().RPCJSONListen)
|
||||
a1rpc, err = newRPCClient(a1Cfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -122,13 +136,15 @@ func testA1itLoadTPFromFolder(t *testing.T) {
|
||||
time.Sleep(time.Duration(100 * time.Millisecond))
|
||||
tStart := time.Date(2017, 3, 3, 10, 39, 33, 0, time.UTC)
|
||||
tEnd := time.Date(2017, 3, 3, 10, 39, 33, 10240, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
Category: "data1",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "rpdata1",
|
||||
Destination: "data",
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tEnd,
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
Category: "data1",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "rpdata1",
|
||||
Destination: "data",
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tEnd,
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
if err := a1rpc.Call(utils.ResponderGetCost, cd, &cc); err != nil {
|
||||
@@ -138,12 +154,14 @@ func testA1itLoadTPFromFolder(t *testing.T) {
|
||||
}
|
||||
|
||||
//add a default charger
|
||||
chargerProfile := &engine.ChargerProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "Default",
|
||||
RunID: utils.MetaDefault,
|
||||
AttributeIDs: []string{"*none"},
|
||||
Weight: 20,
|
||||
chargerProfile := &v1.ChargerWithCache{
|
||||
ChargerProfile: &engine.ChargerProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "Default",
|
||||
RunID: utils.MetaDefault,
|
||||
AttributeIDs: []string{"*none"},
|
||||
Weight: 20,
|
||||
},
|
||||
}
|
||||
var result string
|
||||
if err := a1rpc.Call(utils.ApierV1SetChargerProfile, chargerProfile, &result); err != nil {
|
||||
@@ -211,7 +229,7 @@ func testA1itDataSession1(t *testing.T) {
|
||||
var initRpl *sessions.V1InitSessionReply
|
||||
if err := a1rpc.Call(utils.SessionSv1InitiateSession,
|
||||
initArgs, &initRpl); err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if initRpl.MaxUsage != usage {
|
||||
t.Errorf("Expecting : %+v, received: %+v", usage, initRpl.MaxUsage)
|
||||
@@ -282,7 +300,7 @@ func testA1itDataSession1(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := a1rpc.Call(utils.SessionSv1ProcessCDR, termArgs.CGREvent, &rpl); err != nil {
|
||||
if err := a1rpc.Call(utils.SessionSv1ProcessCDR, &utils.CGREventWithArgDispatcher{CGREvent: termArgs.CGREvent}, &rpl); err != nil {
|
||||
t.Error(err)
|
||||
} else if rpl != utils.OK {
|
||||
t.Errorf("Received reply: %s", rpl)
|
||||
|
||||
@@ -21,7 +21,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -105,7 +104,7 @@ func testV1AccStartEngine(t *testing.T) {
|
||||
|
||||
func testV1AccRpcConn(t *testing.T) {
|
||||
var err error
|
||||
accRpc, err = jsonrpc.Dial("tcp", accCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
accRpc, err = newRPCClient(accCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
@@ -246,15 +245,17 @@ func testV1AccSendToThreshold(t *testing.T) {
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccDisableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:testAccThreshold"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 20.0,
|
||||
Async: true,
|
||||
ActionIDs: []string{"DISABLE_LOG"},
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccDisableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:testAccThreshold"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 20.0,
|
||||
Async: true,
|
||||
ActionIDs: []string{"DISABLE_LOG"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := accRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &reply); err != nil {
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -100,7 +99,7 @@ func testCDREStartEngine(t *testing.T) {
|
||||
|
||||
func testCDRERpcConn(t *testing.T) {
|
||||
var err error
|
||||
cdreRPC, err = jsonrpc.Dial("tcp", cdreCfg.ListenCfg().RPCJSONListen)
|
||||
cdreRPC, err = newRPCClient(cdreCfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -123,7 +122,7 @@ func testV2CDRsStartEngine(t *testing.T) {
|
||||
|
||||
// Connect rpc client to rater
|
||||
func testV2CDRsRpcConn(t *testing.T) {
|
||||
cdrsRpc, err = jsonrpc.Dial("tcp", cdrsCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
cdrsRpc, err = newRPCClient(cdrsCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
@@ -587,7 +586,9 @@ func testV2CDRsGetStats1(t *testing.T) {
|
||||
utils.ConcatenatedKey(utils.MetaSum, utils.DynamicDataPrefix+utils.Usage): utils.NOT_AVAILABLE,
|
||||
}
|
||||
if err := cdrsRpc.Call(utils.StatSv1GetQueueStringMetrics,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: expectedIDs[0]}, &metrics); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{
|
||||
TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: expectedIDs[0]},
|
||||
}, &metrics); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(expectedMetrics, metrics) {
|
||||
t.Errorf("expecting: %+v, received reply: %s", expectedMetrics, metrics)
|
||||
@@ -605,7 +606,9 @@ func testV2CDRsGetThreshold1(t *testing.T) {
|
||||
}
|
||||
var td engine.Threshold
|
||||
if err := cdrsRpc.Call(utils.ThresholdSv1GetThreshold,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_PoccessCDR"}, &td); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{
|
||||
TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "THD_PoccessCDR"},
|
||||
}, &td); err != nil {
|
||||
t.Error(err)
|
||||
} else if td.Hits != 0 {
|
||||
t.Errorf("received: %+v", td)
|
||||
@@ -650,7 +653,9 @@ func testV2CDRsGetStats2(t *testing.T) {
|
||||
utils.ConcatenatedKey(utils.MetaSum, utils.DynamicDataPrefix+utils.Usage): "60000000000",
|
||||
}
|
||||
if err := cdrsRpc.Call(utils.StatSv1GetQueueStringMetrics,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: expectedIDs[0]}, &metrics); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{
|
||||
TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: expectedIDs[0]},
|
||||
}, &metrics); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(expectedMetrics, metrics) {
|
||||
t.Errorf("expecting: %+v, received reply: %s", expectedMetrics, metrics)
|
||||
@@ -660,7 +665,9 @@ func testV2CDRsGetStats2(t *testing.T) {
|
||||
func testV2CDRsGetThreshold2(t *testing.T) {
|
||||
var td engine.Threshold
|
||||
if err := cdrsRpc.Call(utils.ThresholdSv1GetThreshold,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_PoccessCDR"}, &td); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{
|
||||
TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "THD_PoccessCDR"},
|
||||
}, &td); err != nil {
|
||||
t.Error(err)
|
||||
} else if td.Hits != 2 { // 2 Chargers
|
||||
t.Errorf("received: %+v", td)
|
||||
|
||||
@@ -21,7 +21,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -97,7 +96,7 @@ func testV1DataStartEngine(t *testing.T) {
|
||||
|
||||
func testV1DataRpcConn(t *testing.T) {
|
||||
var err error
|
||||
dataRpc, err = jsonrpc.Dial("tcp", dataCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
dataRpc, err = newRPCClient(dataCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -81,7 +80,7 @@ func testDestinationStartEngine(t *testing.T) {
|
||||
|
||||
func testDestinationRpcConn(t *testing.T) {
|
||||
var err error
|
||||
tutorialRpc, err = jsonrpc.Dial("tcp", tutorialCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
tutorialRpc, err = newRPCClient(tutorialCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
|
||||
@@ -22,12 +22,12 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/cgrates/cgrates/apier/v1"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
@@ -94,7 +94,7 @@ func testV1FltrStartEngine(t *testing.T) {
|
||||
|
||||
func testV1FltrRpcConn(t *testing.T) {
|
||||
var err error
|
||||
fltrRpc, err = jsonrpc.Dial("tcp", fltrCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
fltrRpc, err = newRPCClient(fltrCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
@@ -114,14 +114,16 @@ func testV1FltrLoadTarrifPlans(t *testing.T) {
|
||||
func testV1FltrAddStats(t *testing.T) {
|
||||
var reply []string
|
||||
expected := []string{"Stat_1"}
|
||||
ev1 := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
utils.AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
utils.Usage: time.Duration(11 * time.Second),
|
||||
utils.COST: 10.0,
|
||||
ev1 := &engine.StatsArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
utils.AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
utils.Usage: time.Duration(11 * time.Second),
|
||||
utils.COST: 10.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := fltrRpc.Call(utils.StatSv1ProcessEvent, &ev1, &reply); err != nil {
|
||||
@@ -131,7 +133,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_1"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event2",
|
||||
Event: map[string]interface{}{
|
||||
@@ -148,7 +150,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_2"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event2",
|
||||
Event: map[string]interface{}{
|
||||
@@ -165,7 +167,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_2"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event2",
|
||||
Event: map[string]interface{}{
|
||||
@@ -182,7 +184,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_3"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event3",
|
||||
Event: map[string]interface{}{
|
||||
@@ -199,7 +201,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_1_1"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event3",
|
||||
Event: map[string]interface{}{
|
||||
@@ -217,7 +219,7 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
}
|
||||
|
||||
expected = []string{"Stat_1_1"}
|
||||
ev1 = utils.CGREvent{
|
||||
ev1.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event3",
|
||||
Event: map[string]interface{}{
|
||||
@@ -238,14 +240,16 @@ func testV1FltrAddStats(t *testing.T) {
|
||||
func testV1FltrPupulateThreshold(t *testing.T) {
|
||||
//Add a filter of type *stats and check if acd metric is minim 10 ( greater than 10)
|
||||
//we expect that acd from Stat_1 to be 11 so the filter should pass (11 > 10)
|
||||
filter := &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Stats1",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*stats.Stat_1.*acd",
|
||||
Values: []string{"10.0"},
|
||||
filter := &v1.FilterWithCache{
|
||||
Filter: &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Stats1",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*stats.Stat_1.*acd",
|
||||
Values: []string{"10.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -269,19 +273,21 @@ func testV1FltrPupulateThreshold(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
//Add a threshold with filter from above and an inline filter for Account 1010
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Stats1",
|
||||
FilterIDs: []string{"FLTR_TH_Stats1", "*string:~*req.Account:1010"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Stats1",
|
||||
FilterIDs: []string{"FLTR_TH_Stats1", "*string:~*req.Account:1010"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
}
|
||||
if err := fltrRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &result); err != nil {
|
||||
t.Error(err)
|
||||
@@ -292,18 +298,20 @@ func testV1FltrPupulateThreshold(t *testing.T) {
|
||||
if err := fltrRpc.Call(utils.ApierV1GetThresholdProfile,
|
||||
&utils.TenantID{Tenant: tPrfl.Tenant, ID: tPrfl.ID}, &rcvTh); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tPrfl, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl, rcvTh)
|
||||
} else if !reflect.DeepEqual(tPrfl.ThresholdProfile, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl.ThresholdProfile, rcvTh)
|
||||
}
|
||||
}
|
||||
|
||||
func testV1FltrGetThresholdForEvent(t *testing.T) {
|
||||
// check the event
|
||||
tEv := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1010"},
|
||||
tEv := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1010"},
|
||||
},
|
||||
}
|
||||
var ids []string
|
||||
eIDs := []string{"TH_Stats1"}
|
||||
@@ -317,14 +325,16 @@ func testV1FltrGetThresholdForEvent(t *testing.T) {
|
||||
func testV1FltrGetThresholdForEvent2(t *testing.T) {
|
||||
//Add a filter of type *stats and check if acd metric is maximum 10 ( lower than 10)
|
||||
//we expect that acd from Stat_1 to be 11 so the filter should not pass (11 > 10)
|
||||
filter := &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Stats1",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*lt",
|
||||
FieldName: "~*stats.Stat_1.*acd",
|
||||
Values: []string{"10.0"},
|
||||
filter := &v1.FilterWithCache{
|
||||
Filter: &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Stats1",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*lt",
|
||||
FieldName: "~*stats.Stat_1.*acd",
|
||||
Values: []string{"10.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -337,18 +347,20 @@ func testV1FltrGetThresholdForEvent2(t *testing.T) {
|
||||
}
|
||||
|
||||
//update the threshold with new filter
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Stats1",
|
||||
FilterIDs: []string{"FLTR_TH_Stats1", "*string:~*req.Account:1010"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Stats1",
|
||||
FilterIDs: []string{"FLTR_TH_Stats1", "*string:~*req.Account:1010"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 35, 0, 0, time.UTC),
|
||||
},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
}
|
||||
if err := fltrRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &result); err != nil {
|
||||
t.Error(err)
|
||||
@@ -356,11 +368,13 @@ func testV1FltrGetThresholdForEvent2(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
}
|
||||
|
||||
tEv := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1010"},
|
||||
tEv := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1010"},
|
||||
},
|
||||
}
|
||||
var ids []string
|
||||
if err := fltrRpc.Call(utils.ThresholdSv1ProcessEvent, tEv, &ids); err == nil ||
|
||||
@@ -383,7 +397,7 @@ func testV1FltrPopulateResources(t *testing.T) {
|
||||
}
|
||||
|
||||
var result string
|
||||
if err := fltrRpc.Call(utils.ApierV1SetResourceProfile, rlsConfig, &result); err != nil {
|
||||
if err := fltrRpc.Call(utils.ApierV1SetResourceProfile, &v1.ResourceWithCache{ResourceProfile: rlsConfig}, &result); err != nil {
|
||||
t.Error(err)
|
||||
} else if result != utils.OK {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
@@ -416,14 +430,16 @@ func testV1FltrPopulateResources(t *testing.T) {
|
||||
|
||||
//we allocate 3 units to resource and add a filter for Usages > 2
|
||||
//should match (3>2)
|
||||
filter := &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Resource",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*resources.ResTest.TotalUsage",
|
||||
Values: []string{"2.0"},
|
||||
filter := v1.FilterWithCache{
|
||||
Filter: &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Resource",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*resources.ResTest.TotalUsage",
|
||||
Values: []string{"2.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -434,15 +450,17 @@ func testV1FltrPopulateResources(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
}
|
||||
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_ResTest",
|
||||
FilterIDs: []string{"FLTR_TH_Resource", "*string:~*req.Account:2020"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_ResTest",
|
||||
FilterIDs: []string{"FLTR_TH_Resource", "*string:~*req.Account:2020"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 10.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
},
|
||||
}
|
||||
if err := fltrRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &result); err != nil {
|
||||
t.Error(err)
|
||||
@@ -453,16 +471,18 @@ func testV1FltrPopulateResources(t *testing.T) {
|
||||
if err := fltrRpc.Call(utils.ApierV1GetThresholdProfile,
|
||||
&utils.TenantID{Tenant: tPrfl.Tenant, ID: tPrfl.ID}, &rcvTh); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tPrfl, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl, rcvTh)
|
||||
} else if !reflect.DeepEqual(tPrfl.ThresholdProfile, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl.ThresholdProfile, rcvTh)
|
||||
}
|
||||
|
||||
// check the event
|
||||
tEv := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "2020"},
|
||||
tEv := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "2020"},
|
||||
},
|
||||
}
|
||||
var ids []string
|
||||
eIDs := []string{"TH_ResTest"}
|
||||
@@ -475,7 +495,7 @@ func testV1FltrPopulateResources(t *testing.T) {
|
||||
//change the filter
|
||||
//we allocate 3 units to resource and add a filter for Usages < 2
|
||||
//should fail (3<2)
|
||||
filter = &engine.Filter{
|
||||
filter.Filter = &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Resource",
|
||||
Rules: []*engine.FilterRule{
|
||||
@@ -518,14 +538,16 @@ func testV1FltrAccounts(t *testing.T) {
|
||||
// Add a filter with fieldName taken value from account 1001
|
||||
// and check if *monetary balance is minim 9 ( greater than 9)
|
||||
// we expect that the balance to be 10 so the filter should pass (10 > 9)
|
||||
filter := &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Accounts",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*accounts.1001.BalanceMap.*monetary[0].Value",
|
||||
Values: []string{"9"},
|
||||
filter := v1.FilterWithCache{
|
||||
Filter: &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Accounts",
|
||||
Rules: []*engine.FilterRule{
|
||||
{
|
||||
Type: "*gt",
|
||||
FieldName: "~*accounts.1001.BalanceMap.*monetary[0].Value",
|
||||
Values: []string{"9"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -547,15 +569,17 @@ func testV1FltrAccounts(t *testing.T) {
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
//Add a threshold with filter from above and an inline filter for Account 1010
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Account",
|
||||
FilterIDs: []string{"FLTR_TH_Accounts", "*string:~*req.Account:1001"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 90.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TH_Account",
|
||||
FilterIDs: []string{"FLTR_TH_Accounts", "*string:~*req.Account:1001"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Millisecond),
|
||||
Weight: 90.0,
|
||||
ActionIDs: []string{"LOG"},
|
||||
Async: true,
|
||||
},
|
||||
}
|
||||
if err := fltrRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &result); err != nil {
|
||||
t.Error(err)
|
||||
@@ -566,15 +590,17 @@ func testV1FltrAccounts(t *testing.T) {
|
||||
if err := fltrRpc.Call(utils.ApierV1GetThresholdProfile,
|
||||
&utils.TenantID{Tenant: tPrfl.Tenant, ID: tPrfl.ID}, &rcvTh); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(tPrfl, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl, rcvTh)
|
||||
} else if !reflect.DeepEqual(tPrfl.ThresholdProfile, rcvTh) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", tPrfl.ThresholdProfile, rcvTh)
|
||||
}
|
||||
|
||||
tEv := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001"},
|
||||
tEv := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "event1",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001"},
|
||||
},
|
||||
}
|
||||
var ids []string
|
||||
if err := fltrRpc.Call(utils.ThresholdSv1ProcessEvent, tEv, &ids); err != nil {
|
||||
@@ -587,7 +613,7 @@ func testV1FltrAccounts(t *testing.T) {
|
||||
// Add a filter with fieldName taken value from account 1001
|
||||
// and check if *monetary balance is is minim 11 ( greater than 11)
|
||||
// we expect that the balance to be 10 so the filter should not pass (10 > 11)
|
||||
filter = &engine.Filter{
|
||||
filter.Filter = &engine.Filter{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "FLTR_TH_Accounts",
|
||||
Rules: []*engine.FilterRule{
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -109,7 +108,7 @@ func testMCDRCStartEngine(t *testing.T) {
|
||||
// Connect rpc client to rater
|
||||
func testMCDRCRpcConn(t *testing.T) {
|
||||
var err error
|
||||
rater, err = jsonrpc.Dial("tcp", cfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
rater, err = newRPCClient(cfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
|
||||
Copyright (C) ITsysCOM GmbH
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
*/
|
||||
package general_tests
|
||||
@@ -21,7 +21,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -95,7 +94,7 @@ func testV1RsStartEngine(t *testing.T) {
|
||||
|
||||
func testV1RsRpcConn(t *testing.T) {
|
||||
var err error
|
||||
rlsV1Rpc, err = jsonrpc.Dial("tcp", rlsV1Cfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
rlsV1Rpc, err = newRPCClient(rlsV1Cfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -106,7 +105,7 @@ func testRPCMethodsStartEngine(t *testing.T) {
|
||||
|
||||
func testRPCMethodsRpcConn(t *testing.T) {
|
||||
var err error
|
||||
rpcRpc, err = jsonrpc.Dial("tcp", rpcCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
rpcRpc, err = newRPCClient(rpcCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
@@ -153,14 +152,16 @@ func testRPCMethodsAddData(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
//Add a thresholdProfile to disable account
|
||||
tPrfl := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccDisableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*req.DisableAction:DisableAction"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 30.0,
|
||||
ActionIDs: []string{"DISABLE_LOG"},
|
||||
tPrfl := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccDisableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*req.DisableAction:DisableAction"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 30.0,
|
||||
ActionIDs: []string{"DISABLE_LOG"},
|
||||
},
|
||||
}
|
||||
if err := rpcRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl, &reply); err != nil {
|
||||
t.Error(err)
|
||||
@@ -168,14 +169,16 @@ func testRPCMethodsAddData(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", reply)
|
||||
}
|
||||
//Add a thresholdProfile to enable account
|
||||
tPrfl2 := &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccEnableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*req.EnableAction:EnableAction"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 30.0,
|
||||
ActionIDs: []string{"ENABLE_LOG"},
|
||||
tPrfl2 := &engine.ThresholdWithCache{
|
||||
ThresholdProfile: &engine.ThresholdProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "THD_AccEnableAndLog",
|
||||
FilterIDs: []string{"*string:~*req.Account:1001", "*string:~*req.EnableAction:EnableAction"},
|
||||
MaxHits: -1,
|
||||
MinSleep: time.Duration(1 * time.Second),
|
||||
Weight: 30.0,
|
||||
ActionIDs: []string{"ENABLE_LOG"},
|
||||
},
|
||||
}
|
||||
if err := rpcRpc.Call(utils.ApierV1SetThresholdProfile, tPrfl2, &reply); err != nil {
|
||||
t.Error(err)
|
||||
@@ -215,12 +218,14 @@ func testRPCMethodsAuthorizeSession(t *testing.T) {
|
||||
|
||||
//disable the account
|
||||
var ids []string
|
||||
thEvent := &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
thEvent := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
},
|
||||
},
|
||||
}
|
||||
//process event
|
||||
@@ -260,7 +265,7 @@ func testRPCMethodsAuthorizeSession(t *testing.T) {
|
||||
}
|
||||
|
||||
//enable the account
|
||||
thEvent = &utils.CGREvent{
|
||||
thEvent.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "EnableAccount",
|
||||
Event: map[string]interface{}{
|
||||
@@ -308,12 +313,14 @@ func testRPCMethodsInitSession(t *testing.T) {
|
||||
|
||||
//disable the account
|
||||
var ids []string
|
||||
thEvent := &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
thEvent := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
},
|
||||
},
|
||||
}
|
||||
//process event
|
||||
@@ -353,7 +360,7 @@ func testRPCMethodsInitSession(t *testing.T) {
|
||||
}
|
||||
|
||||
//enable the account
|
||||
thEvent = &utils.CGREvent{
|
||||
thEvent.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "EnableAccount",
|
||||
Event: map[string]interface{}{
|
||||
@@ -401,12 +408,14 @@ func testRPCMethodsUpdateSession(t *testing.T) {
|
||||
|
||||
//disable the account
|
||||
var ids []string
|
||||
thEvent := &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
thEvent := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
},
|
||||
},
|
||||
}
|
||||
//process event
|
||||
@@ -446,7 +455,7 @@ func testRPCMethodsUpdateSession(t *testing.T) {
|
||||
}
|
||||
|
||||
//enable the account
|
||||
thEvent = &utils.CGREvent{
|
||||
thEvent.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "EnableAccount",
|
||||
Event: map[string]interface{}{
|
||||
@@ -511,20 +520,22 @@ func testRPCMethodsTerminateSession(t *testing.T) {
|
||||
}
|
||||
|
||||
func testRPCMethodsProcessCDR(t *testing.T) {
|
||||
args := utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "testRPCMethodsProcessCDR",
|
||||
Event: map[string]interface{}{
|
||||
utils.Tenant: "cgrates.org",
|
||||
utils.ToR: utils.VOICE,
|
||||
utils.OriginID: "testRPCMethodsProcessCDR",
|
||||
utils.RequestType: utils.META_PREPAID,
|
||||
utils.Account: "1001",
|
||||
utils.Subject: "ANY2CNT",
|
||||
utils.Destination: "1002",
|
||||
utils.SetupTime: time.Date(2018, time.January, 7, 16, 60, 0, 0, time.UTC),
|
||||
utils.AnswerTime: time.Date(2018, time.January, 7, 16, 60, 10, 0, time.UTC),
|
||||
utils.Usage: 10 * time.Minute,
|
||||
args := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "testRPCMethodsProcessCDR",
|
||||
Event: map[string]interface{}{
|
||||
utils.Tenant: "cgrates.org",
|
||||
utils.ToR: utils.VOICE,
|
||||
utils.OriginID: "testRPCMethodsProcessCDR",
|
||||
utils.RequestType: utils.META_PREPAID,
|
||||
utils.Account: "1001",
|
||||
utils.Subject: "ANY2CNT",
|
||||
utils.Destination: "1002",
|
||||
utils.SetupTime: time.Date(2018, time.January, 7, 16, 60, 0, 0, time.UTC),
|
||||
utils.AnswerTime: time.Date(2018, time.January, 7, 16, 60, 10, 0, time.UTC),
|
||||
utils.Usage: 10 * time.Minute,
|
||||
},
|
||||
},
|
||||
}
|
||||
var rply string
|
||||
@@ -537,7 +548,7 @@ func testRPCMethodsProcessCDR(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
//verify the CDR
|
||||
var cdrs []*engine.CDR
|
||||
argsCDR := utils.RPCCDRsFilter{RunIDs: []string{utils.MetaRaw}}
|
||||
argsCDR := &utils.RPCCDRsFilterWithArgDispatcher{RPCCDRsFilter: &utils.RPCCDRsFilter{RunIDs: []string{utils.MetaRaw}}}
|
||||
if err := rpcRpc.Call(utils.CDRsV1GetCDRs, argsCDR, &cdrs); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(cdrs) != 1 {
|
||||
@@ -611,12 +622,14 @@ func testRPCMethodsProcessEvent(t *testing.T) {
|
||||
|
||||
//disable the account
|
||||
var ids []string
|
||||
thEvent := &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
thEvent := &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "DisableAccount",
|
||||
Event: map[string]interface{}{
|
||||
utils.Account: "1001",
|
||||
"DisableAction": "DisableAction",
|
||||
},
|
||||
},
|
||||
}
|
||||
//process event
|
||||
@@ -657,7 +670,7 @@ func testRPCMethodsProcessEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
//enable the account
|
||||
thEvent = &utils.CGREvent{
|
||||
thEvent.CGREvent = &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "EnableAccount",
|
||||
Event: map[string]interface{}{
|
||||
@@ -700,7 +713,7 @@ func testRPCMethodsCdrsProcessCDR(t *testing.T) {
|
||||
time.Sleep(time.Duration(150) * time.Millisecond) // Give time for CDR to be rated
|
||||
//verify the CDR
|
||||
var cdrs []*engine.CDR
|
||||
argsCDR := utils.RPCCDRsFilter{RunIDs: []string{utils.MetaRaw}}
|
||||
argsCDR := utils.RPCCDRsFilterWithArgDispatcher{RPCCDRsFilter: &utils.RPCCDRsFilter{RunIDs: []string{utils.MetaRaw}}}
|
||||
if err := rpcRpc.Call(utils.CDRsV1GetCDRs, argsCDR, &cdrs); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(cdrs) != 1 {
|
||||
@@ -801,15 +814,17 @@ func testRPCMethodsLoadData(t *testing.T) {
|
||||
|
||||
func testRPCMethodsResponderDebit(t *testing.T) {
|
||||
tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderDebit",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1001",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(15) * time.Second),
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderDebit",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1001",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(15) * time.Second),
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
//cache the response
|
||||
@@ -822,8 +837,10 @@ func testRPCMethodsResponderDebit(t *testing.T) {
|
||||
t.Errorf("Expecting: %+v, \n received: %+v",
|
||||
15, cc.Cost)
|
||||
}
|
||||
cd2 := engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderDebit",
|
||||
cd2 := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderDebit",
|
||||
},
|
||||
}
|
||||
var ccCache engine.CallCost
|
||||
//cache the response
|
||||
@@ -842,16 +859,18 @@ func testRPCMethodsResponderDebit(t *testing.T) {
|
||||
|
||||
func testRPCMethodsResponderMaxDebit(t *testing.T) {
|
||||
tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderMaxDebit",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Account: "1001",
|
||||
Subject: "free",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(15) * time.Second),
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderMaxDebit",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Account: "1001",
|
||||
Subject: "free",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(15) * time.Second),
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
//cache the response
|
||||
@@ -864,8 +883,10 @@ func testRPCMethodsResponderMaxDebit(t *testing.T) {
|
||||
t.Errorf("Expecting: %+v, \n received: %+v",
|
||||
0, cc.Cost)
|
||||
}
|
||||
cd2 := engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderMaxDebit",
|
||||
cd2 := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
CgrID: "testRPCMethodsResponderMaxDebit",
|
||||
},
|
||||
}
|
||||
var ccCache engine.CallCost
|
||||
//cache the response
|
||||
|
||||
@@ -23,7 +23,6 @@ package general_tests
|
||||
import (
|
||||
"flag"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -116,7 +115,7 @@ func testRedisSentinelStartEngine(t *testing.T) {
|
||||
|
||||
func testRedisSentinelRPCCon(t *testing.T) {
|
||||
var err error
|
||||
sentinelRPC, err = jsonrpc.Dial("tcp", sentinelConfig.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
sentinelRPC, err = newRPCClient(sentinelConfig.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -93,7 +92,7 @@ func testSes2ItStartEngine(t *testing.T) {
|
||||
|
||||
func testSes2ItRPCConn(t *testing.T) {
|
||||
var err error
|
||||
ses2RPC, err = jsonrpc.Dial("tcp", ses2Cfg.ListenCfg().RPCJSONListen)
|
||||
ses2RPC, err = newRPCClient(ses2Cfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -56,7 +55,7 @@ var (
|
||||
|
||||
testSes3ItAddVoiceBalance,
|
||||
testSes3ItTerminatWithoutInit,
|
||||
testSes3ItInitAfterTerminate,
|
||||
// testSes3ItInitAfterTerminate,
|
||||
testSes3ItBalance,
|
||||
testSes3ItCDRs,
|
||||
|
||||
@@ -98,7 +97,7 @@ func testSes3ItStartEngine(t *testing.T) {
|
||||
|
||||
func testSes3ItRPCConn(t *testing.T) {
|
||||
var err error
|
||||
ses3RPC, err = jsonrpc.Dial("tcp", ses3Cfg.ListenCfg().RPCJSONListen)
|
||||
ses3RPC, err = newRPCClient(ses3Cfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -175,9 +174,14 @@ func testSes3ItProcessEvent(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(eAttrs, rply.Attributes) {
|
||||
if *encoding == utils.MetaGOB {
|
||||
eAttrs.CGREvent.Event[utils.Usage] = 5 * time.Minute
|
||||
eAttrs.CGREvent.Event[utils.SetupTime], _ = utils.IfaceAsTime("2018-01-07T17:00:00Z", "")
|
||||
eAttrs.CGREvent.Event[utils.AnswerTime], _ = utils.IfaceAsTime("2018-01-07T17:00:10Z", "")
|
||||
}
|
||||
if !reflect.DeepEqual(*eAttrs, *rply.Attributes) {
|
||||
t.Errorf("expecting: %+v, received: %+v",
|
||||
utils.ToJSON(eAttrs), utils.ToJSON(rply.Attributes))
|
||||
eAttrs.CGREvent, rply.Attributes.CGREvent)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +189,7 @@ func testSes3ItThreshold1002After(t *testing.T) {
|
||||
var td engine.Threshold
|
||||
eTd := engine.Threshold{Tenant: "cgrates.org", ID: "THD_ACNT_1001", Hits: 1}
|
||||
if err := ses3RPC.Call(utils.ThresholdSv1GetThreshold,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_ACNT_1001"}, &td); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "THD_ACNT_1001"}}, &td); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eTd.Tenant, td.Tenant) {
|
||||
t.Errorf("expecting: %+v, received: %+v", eTd.Tenant, td.Tenant)
|
||||
@@ -205,7 +209,7 @@ func testSes3ItStatMetricsAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
if err := ses3RPC.Call(utils.StatSv1GetQueueStringMetrics,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "Stat_1"}, &metrics); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "Stat_1"}}, &metrics); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(statMetrics, metrics) {
|
||||
@@ -217,7 +221,7 @@ func testSes3ItThreshold1002After2(t *testing.T) {
|
||||
var td engine.Threshold
|
||||
eTd := engine.Threshold{Tenant: "cgrates.org", ID: "THD_ACNT_1001", Hits: 2}
|
||||
if err := ses3RPC.Call(utils.ThresholdSv1GetThreshold,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "THD_ACNT_1001"}, &td); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "THD_ACNT_1001"}}, &td); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eTd.Tenant, td.Tenant) {
|
||||
t.Errorf("expecting: %+v, received: %+v", eTd.Tenant, td.Tenant)
|
||||
@@ -237,7 +241,7 @@ func testSes3ItStatMetricsAfter2(t *testing.T) {
|
||||
}
|
||||
|
||||
if err := ses3RPC.Call(utils.StatSv1GetQueueStringMetrics,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "Stat_1"}, &metrics); err != nil {
|
||||
&utils.TenantIDWithArgDispatcher{TenantID: &utils.TenantID{Tenant: "cgrates.org", ID: "Stat_1"}}, &metrics); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(statMetrics, metrics) {
|
||||
@@ -301,16 +305,16 @@ func testSes3ItTerminatWithoutInit(t *testing.T) {
|
||||
var rply string
|
||||
if err := ses3RPC.Call(utils.SessionSv1TerminateSession,
|
||||
args, &rply); err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if rply != utils.OK {
|
||||
t.Errorf("Unexpected reply: %s", rply)
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
// }
|
||||
|
||||
func testSes3ItInitAfterTerminate(t *testing.T) {
|
||||
// func testSes3ItInitAfterTerminate(t *testing.T) {
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
args1 := &sessions.V1InitSessionArgs{
|
||||
InitSession: true,
|
||||
@@ -342,14 +346,14 @@ func testSes3ItInitAfterTerminate(t *testing.T) {
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
aSessions := make([]*sessions.ExternalSession, 0)
|
||||
if err := ses3RPC.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err == nil ||
|
||||
if err := ses3RPC.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err == nil ||
|
||||
err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
func testSes3ItBalance(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
var acnt *engine.Account
|
||||
var acnt engine.Account
|
||||
attrs := &utils.AttrGetAccount{
|
||||
Tenant: "cgrates.org",
|
||||
Account: "1002",
|
||||
@@ -364,22 +368,25 @@ func testSes3ItBalance(t *testing.T) {
|
||||
|
||||
func testSes3ItCDRs(t *testing.T) {
|
||||
var reply string
|
||||
if err := ses3RPC.Call(utils.SessionSv1ProcessCDR, &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TestSesItProccesCDR",
|
||||
Event: map[string]interface{}{
|
||||
utils.Tenant: "cgrates.org",
|
||||
utils.Category: "call",
|
||||
utils.ToR: utils.VOICE,
|
||||
utils.OriginID: "TestTerminate",
|
||||
utils.RequestType: utils.META_PREPAID,
|
||||
utils.Account: "1002",
|
||||
utils.Subject: "1001",
|
||||
utils.Destination: "1001",
|
||||
utils.SetupTime: time.Date(2018, time.January, 7, 16, 60, 0, 0, time.UTC),
|
||||
utils.AnswerTime: time.Date(2018, time.January, 7, 16, 60, 10, 0, time.UTC),
|
||||
utils.Usage: 2 * time.Second,
|
||||
}}, &reply); err != nil {
|
||||
if err := ses3RPC.Call(utils.SessionSv1ProcessCDR, &engine.ArgsProcessEvent{
|
||||
CGREvent: &utils.CGREvent{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TestSesItProccesCDR",
|
||||
Event: map[string]interface{}{
|
||||
utils.Tenant: "cgrates.org",
|
||||
utils.Category: "call",
|
||||
utils.ToR: utils.VOICE,
|
||||
utils.OriginID: "TestTerminate",
|
||||
utils.RequestType: utils.META_PREPAID,
|
||||
utils.Account: "1002",
|
||||
utils.Subject: "1001",
|
||||
utils.Destination: "1001",
|
||||
utils.SetupTime: time.Date(2018, time.January, 7, 16, 60, 0, 0, time.UTC),
|
||||
utils.AnswerTime: time.Date(2018, time.January, 7, 16, 60, 10, 0, time.UTC),
|
||||
utils.Usage: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if reply != utils.OK {
|
||||
t.Errorf("Received reply: %s", reply)
|
||||
|
||||
@@ -21,7 +21,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -87,7 +86,7 @@ func testSesItStartEngine(t *testing.T) {
|
||||
|
||||
func testSesItRPCConn(t *testing.T) {
|
||||
var err error
|
||||
sesRPC, err = jsonrpc.Dial("tcp", sesCfg.ListenCfg().RPCJSONListen)
|
||||
sesRPC, err = newRPCClient(sesCfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -103,7 +102,7 @@ func testSesItLoadFromFolder(t *testing.T) {
|
||||
}
|
||||
|
||||
func testAccountBalance2(t *testing.T, sracc, srten, balType string, expected float64) {
|
||||
var acnt *engine.Account
|
||||
var acnt engine.Account
|
||||
attrs := &utils.AttrGetAccount{
|
||||
Tenant: srten,
|
||||
Account: sracc,
|
||||
@@ -198,7 +197,7 @@ func testSesItTerminateSession(t *testing.T) {
|
||||
t.Errorf("Unexpected reply: %s", rply)
|
||||
}
|
||||
aSessions := make([]*sessions.ExternalSession, 0)
|
||||
if err := sesRPC.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err == nil ||
|
||||
if err := sesRPC.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err == nil ||
|
||||
err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
import (
|
||||
"fmt"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -129,10 +128,10 @@ func testSessionSRplStartEngine(t *testing.T) {
|
||||
|
||||
// Connect rpc client to rater
|
||||
func testSessionSRplApierRpcConn(t *testing.T) {
|
||||
if smgRplcMstrRPC, err = jsonrpc.Dial("tcp", smgRplcMasterCfg.ListenCfg().RPCJSONListen); err != nil {
|
||||
if smgRplcMstrRPC, err = newRPCClient(smgRplcMasterCfg.ListenCfg()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if smgRplcSlvRPC, err = jsonrpc.Dial("tcp", smgRplcSlaveCfg.ListenCfg().RPCJSONListen); err != nil {
|
||||
if smgRplcSlvRPC, err = newRPCClient(smgRplcSlaveCfg.ListenCfg()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -151,11 +150,11 @@ func testSessionSRplInitiate(t *testing.T) {
|
||||
var aSessions []*sessions.ExternalSession
|
||||
//make sure we don't have active sessions on master and passive on slave
|
||||
if err := smgRplcMstrRPC.Call(utils.SessionSv1GetActiveSessions,
|
||||
nil, &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
new(utils.SessionFilter), &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetPassiveSessions,
|
||||
nil, &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
new(utils.SessionFilter), &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -299,14 +298,14 @@ func testSessionSRplActivateSlave(t *testing.T) {
|
||||
}
|
||||
// activate sessions on slave
|
||||
var rplActivate string
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1ActivateSessions, nil, &rplActivate); err != nil {
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1ActivateSessions, []string{}, &rplActivate); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
//check if the active session is on slave now
|
||||
var aSessions []*sessions.ExternalSession
|
||||
var autoDebit1, autoDebit2 time.Time
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err != nil {
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err != nil {
|
||||
t.Error(err)
|
||||
} else if len(aSessions) != 1 {
|
||||
t.Errorf("Unexpected number of sessions received: %+v", utils.ToIJSON(aSessions))
|
||||
@@ -317,7 +316,7 @@ func testSessionSRplActivateSlave(t *testing.T) {
|
||||
t.Errorf("unexpected NextAutoDebit: %s", utils.ToIJSON(aSessions[0]))
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err != nil {
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err != nil {
|
||||
t.Error(err)
|
||||
} else if len(aSessions) != 1 {
|
||||
t.Errorf("Unexpected number of sessions received: %+v", utils.ToIJSON(aSessions))
|
||||
@@ -378,13 +377,13 @@ func testSessionSRplTerminate(t *testing.T) {
|
||||
|
||||
//check if the session was terminated on slave
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetActiveSessions,
|
||||
nil, &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
new(utils.SessionFilter), &aSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Errorf("Error: %v with len(aSessions)=%v , session : %+v", err, len(aSessions), utils.ToIJSON(aSessions))
|
||||
}
|
||||
// check to don't have passive session on slave
|
||||
var pSessions []*sessions.ExternalSession
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetPassiveSessions, nil,
|
||||
&pSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
if err := smgRplcSlvRPC.Call(utils.SessionSv1GetPassiveSessions,
|
||||
new(utils.SessionFilter), &pSessions); err == nil || err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Errorf("Error: %v with len(pSessions)=%v , session : %+v", err, len(pSessions), utils.ToIJSON(pSessions))
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -89,7 +88,7 @@ func testSrItStartEngine(t *testing.T) {
|
||||
|
||||
func testSrItRPCConn(t *testing.T) {
|
||||
var err error
|
||||
srrpc, err = jsonrpc.Dial("tcp", srCfg.ListenCfg().RPCJSONListen)
|
||||
srrpc, err = newRPCClient(srCfg.ListenCfg())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -200,7 +199,7 @@ func testSrItTerminateSession(t *testing.T) {
|
||||
t.Errorf("Unexpected reply: %s", rply)
|
||||
}
|
||||
aSessions := make([]*sessions.ExternalSession, 0)
|
||||
if err := srrpc.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err == nil ||
|
||||
if err := srrpc.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err == nil ||
|
||||
err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -289,7 +288,7 @@ func testSrItTerminateSession2(t *testing.T) {
|
||||
t.Errorf("Unexpected reply: %s", rply)
|
||||
}
|
||||
aSessions := make([]*sessions.ExternalSession, 0)
|
||||
if err := srrpc.Call(utils.SessionSv1GetActiveSessions, nil, &aSessions); err == nil ||
|
||||
if err := srrpc.Call(utils.SessionSv1GetActiveSessions, new(utils.SessionFilter), &aSessions); err == nil ||
|
||||
err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -22,12 +22,12 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/cgrates/cgrates/apier/v1"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
@@ -37,7 +37,7 @@ var (
|
||||
splSv1CfgPath string
|
||||
splSv1Cfg *config.CGRConfig
|
||||
splSv1Rpc *rpc.Client
|
||||
splPrf *engine.SupplierProfile
|
||||
splPrf *v1.SupplierWithCache
|
||||
splSv1ConfDIR string //run tests for specific configuration
|
||||
|
||||
sTestsSupplierSV1 = []func(t *testing.T){
|
||||
@@ -104,7 +104,7 @@ func testV1SplSStartEngine(t *testing.T) {
|
||||
|
||||
func testV1SplSRpcConn(t *testing.T) {
|
||||
var err error
|
||||
splSv1Rpc, err = jsonrpc.Dial("tcp", splSv1Cfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
splSv1Rpc, err = newRPCClient(splSv1Cfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
@@ -126,20 +126,22 @@ func testV1SplSSetSupplierProfilesWithoutRatingPlanIDs(t *testing.T) {
|
||||
err.Error() != utils.ErrNotFound.Error() {
|
||||
t.Error(err)
|
||||
}
|
||||
splPrf = &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TEST_PROFILE2",
|
||||
Sorting: utils.MetaLC,
|
||||
Suppliers: []*engine.Supplier{
|
||||
{
|
||||
ID: "SPL1",
|
||||
FilterIDs: []string{"FLTR_1"},
|
||||
AccountIDs: []string{"accc"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
splPrf = &v1.SupplierWithCache{
|
||||
SupplierProfile: &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "TEST_PROFILE2",
|
||||
Sorting: utils.MetaLC,
|
||||
Suppliers: []*engine.Supplier{
|
||||
{
|
||||
ID: "SPL1",
|
||||
FilterIDs: []string{"FLTR_1"},
|
||||
AccountIDs: []string{"accc"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
Weight: 10,
|
||||
}
|
||||
var result string
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetSupplierProfile, splPrf, &result); err != nil {
|
||||
@@ -150,8 +152,8 @@ func testV1SplSSetSupplierProfilesWithoutRatingPlanIDs(t *testing.T) {
|
||||
if err := splSv1Rpc.Call(utils.ApierV1GetSupplierProfile,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "TEST_PROFILE2"}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(splPrf, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf, reply)
|
||||
} else if !reflect.DeepEqual(splPrf.SupplierProfile, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf.SupplierProfile, reply)
|
||||
}
|
||||
ev := &engine.ArgsGetSuppliers{
|
||||
CGREvent: &utils.CGREvent{
|
||||
@@ -189,34 +191,36 @@ func testV1SplSAddNewSplPrf(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
//create a new Supplier Profile to test *reas and *reds sorting strategy
|
||||
splPrf = &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "SPL_ResourceTest",
|
||||
Sorting: utils.MetaReas,
|
||||
FilterIDs: []string{"*string:~*req.CustomField:ResourceTest"},
|
||||
Suppliers: []*engine.Supplier{
|
||||
//supplier1 will have ResourceUsage = 11
|
||||
{
|
||||
ID: "supplier1",
|
||||
ResourceIDs: []string{"ResourceSupplier1", "Resource2Supplier1"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
//supplier2 and supplier3 will have the same ResourceUsage = 7
|
||||
{
|
||||
ID: "supplier2",
|
||||
ResourceIDs: []string{"ResourceSupplier2"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
{
|
||||
ID: "supplier3",
|
||||
ResourceIDs: []string{"ResourceSupplier3"},
|
||||
Weight: 35,
|
||||
Blocker: false,
|
||||
splPrf = &v1.SupplierWithCache{
|
||||
SupplierProfile: &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "SPL_ResourceTest",
|
||||
Sorting: utils.MetaReas,
|
||||
FilterIDs: []string{"*string:~*req.CustomField:ResourceTest"},
|
||||
Suppliers: []*engine.Supplier{
|
||||
//supplier1 will have ResourceUsage = 11
|
||||
{
|
||||
ID: "supplier1",
|
||||
ResourceIDs: []string{"ResourceSupplier1", "Resource2Supplier1"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
//supplier2 and supplier3 will have the same ResourceUsage = 7
|
||||
{
|
||||
ID: "supplier2",
|
||||
ResourceIDs: []string{"ResourceSupplier2"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
{
|
||||
ID: "supplier3",
|
||||
ResourceIDs: []string{"ResourceSupplier3"},
|
||||
Weight: 35,
|
||||
Blocker: false,
|
||||
},
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
Weight: 10,
|
||||
}
|
||||
var result string
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetSupplierProfile, splPrf, &result); err != nil {
|
||||
@@ -227,27 +231,29 @@ func testV1SplSAddNewSplPrf(t *testing.T) {
|
||||
if err := splSv1Rpc.Call(utils.ApierV1GetSupplierProfile,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "SPL_ResourceTest"}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(splPrf, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf, reply)
|
||||
} else if !reflect.DeepEqual(splPrf.SupplierProfile, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf.SupplierProfile, reply)
|
||||
}
|
||||
}
|
||||
|
||||
func testV1SplSAddNewResPrf(t *testing.T) {
|
||||
var result string
|
||||
//add ResourceSupplier1
|
||||
rPrf := &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier1",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier1", "*string:~*req.ResID:ResourceSupplier1"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
rPrf := &v1.ResourceWithCache{
|
||||
ResourceProfile: &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier1",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier1", "*string:~*req.ResID:ResourceSupplier1"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
}
|
||||
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetResourceProfile, rPrf, &result); err != nil {
|
||||
@@ -256,19 +262,21 @@ func testV1SplSAddNewResPrf(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
}
|
||||
//add Resource2Supplier1
|
||||
rPrf2 := &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "Resource2Supplier1",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier1", "*string:~*req.ResID:Resource2Supplier1"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
rPrf2 := &v1.ResourceWithCache{
|
||||
ResourceProfile: &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "Resource2Supplier1",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier1", "*string:~*req.ResID:Resource2Supplier1"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 30,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 30,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
}
|
||||
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetResourceProfile, rPrf2, &result); err != nil {
|
||||
@@ -277,19 +285,21 @@ func testV1SplSAddNewResPrf(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
}
|
||||
//add ResourceSupplier2
|
||||
rPrf3 := &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier2",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier2", "*string:~*req.ResID:ResourceSupplier2"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
rPrf3 := &v1.ResourceWithCache{
|
||||
ResourceProfile: &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier2",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier2", "*string:~*req.ResID:ResourceSupplier2"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
}
|
||||
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetResourceProfile, rPrf3, &result); err != nil {
|
||||
@@ -298,19 +308,21 @@ func testV1SplSAddNewResPrf(t *testing.T) {
|
||||
t.Error("Unexpected reply returned", result)
|
||||
}
|
||||
//add ResourceSupplier2
|
||||
rPrf4 := &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier3",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier3", "*string:~*req.ResID:ResourceSupplier3"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
rPrf4 := &v1.ResourceWithCache{
|
||||
ResourceProfile: &engine.ResourceProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "ResourceSupplier3",
|
||||
FilterIDs: []string{"*string:~*req.Supplier:supplier3", "*string:~*req.ResID:ResourceSupplier3"},
|
||||
ActivationInterval: &utils.ActivationInterval{
|
||||
ActivationTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
ExpiryTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC),
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
},
|
||||
UsageTTL: time.Duration(1) * time.Minute,
|
||||
Limit: 10,
|
||||
Stored: true,
|
||||
Weight: 20,
|
||||
ThresholdIDs: []string{utils.META_NONE},
|
||||
}
|
||||
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetResourceProfile, rPrf4, &result); err != nil {
|
||||
@@ -452,34 +464,36 @@ func testV1SplSAddNewSplPrf2(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
//create a new Supplier Profile to test *reas and *reds sorting strategy
|
||||
splPrf = &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "SPL_ResourceDescendent",
|
||||
Sorting: utils.MetaReds,
|
||||
FilterIDs: []string{"*string:~*req.CustomField:ResourceDescendent"},
|
||||
Suppliers: []*engine.Supplier{
|
||||
//supplier1 will have ResourceUsage = 11
|
||||
{
|
||||
ID: "supplier1",
|
||||
ResourceIDs: []string{"ResourceSupplier1", "Resource2Supplier1"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
//supplier2 and supplier3 will have the same ResourceUsage = 7
|
||||
{
|
||||
ID: "supplier2",
|
||||
ResourceIDs: []string{"ResourceSupplier2"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
{
|
||||
ID: "supplier3",
|
||||
ResourceIDs: []string{"ResourceSupplier3"},
|
||||
Weight: 35,
|
||||
Blocker: false,
|
||||
splPrf = &v1.SupplierWithCache{
|
||||
SupplierProfile: &engine.SupplierProfile{
|
||||
Tenant: "cgrates.org",
|
||||
ID: "SPL_ResourceDescendent",
|
||||
Sorting: utils.MetaReds,
|
||||
FilterIDs: []string{"*string:~*req.CustomField:ResourceDescendent"},
|
||||
Suppliers: []*engine.Supplier{
|
||||
//supplier1 will have ResourceUsage = 11
|
||||
{
|
||||
ID: "supplier1",
|
||||
ResourceIDs: []string{"ResourceSupplier1", "Resource2Supplier1"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
//supplier2 and supplier3 will have the same ResourceUsage = 7
|
||||
{
|
||||
ID: "supplier2",
|
||||
ResourceIDs: []string{"ResourceSupplier2"},
|
||||
Weight: 20,
|
||||
Blocker: false,
|
||||
},
|
||||
{
|
||||
ID: "supplier3",
|
||||
ResourceIDs: []string{"ResourceSupplier3"},
|
||||
Weight: 35,
|
||||
Blocker: false,
|
||||
},
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
Weight: 10,
|
||||
}
|
||||
var result string
|
||||
if err := splSv1Rpc.Call(utils.ApierV1SetSupplierProfile, splPrf, &result); err != nil {
|
||||
@@ -490,8 +504,8 @@ func testV1SplSAddNewSplPrf2(t *testing.T) {
|
||||
if err := splSv1Rpc.Call(utils.ApierV1GetSupplierProfile,
|
||||
&utils.TenantID{Tenant: "cgrates.org", ID: "SPL_ResourceDescendent"}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(splPrf, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf, reply)
|
||||
} else if !reflect.DeepEqual(splPrf.SupplierProfile, reply) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", splPrf.SupplierProfile, reply)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func testSTIStartEngine(t *testing.T) {
|
||||
// Connect rpc client to rater
|
||||
func testSTIRpcConn(t *testing.T) {
|
||||
var err error
|
||||
stiRpc, err = jsonrpc.Dial("tcp", stiCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
stiRpc, err = newRPCClient(stiCfg) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -103,7 +102,7 @@ func testTpStartEngine(t *testing.T) {
|
||||
// Connect rpc client to rater
|
||||
func testTpRpcConn(t *testing.T) {
|
||||
var err error
|
||||
tpRPC, err = jsonrpc.Dial("tcp", tpCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
tpRPC, err = newRPCClient(tpCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -120,14 +119,16 @@ func testTpLoadTariffPlanFromFolder(t *testing.T) {
|
||||
|
||||
func testTpBalanceCounter(t *testing.T) {
|
||||
tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1001",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1001",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
if err := tpRPC.Call(utils.ResponderDebit, cd, &cc); err != nil {
|
||||
@@ -202,15 +203,17 @@ func testTpZeroCost(t *testing.T) {
|
||||
}
|
||||
balanceValueBefore := acnt.BalanceMap[utils.MONETARY][0].Value
|
||||
tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "free",
|
||||
Account: "1012",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "free",
|
||||
Account: "1012",
|
||||
Destination: "+49",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
if err := tpRPC.Call(utils.ResponderDebit, cd, &cc); err != nil {
|
||||
@@ -227,15 +230,17 @@ func testTpZeroCost(t *testing.T) {
|
||||
|
||||
func testTpZeroNegativeCost(t *testing.T) {
|
||||
tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC)
|
||||
cd := engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "free",
|
||||
Account: "1013",
|
||||
Destination: "+4915",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
cd := &engine.CallDescriptorWithArgDispatcher{
|
||||
CallDescriptor: &engine.CallDescriptor{
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "free",
|
||||
Account: "1013",
|
||||
Destination: "+4915",
|
||||
DurationIndex: 0,
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tStart.Add(time.Duration(20) * time.Second),
|
||||
},
|
||||
}
|
||||
var cc engine.CallCost
|
||||
if err := tpRPC.Call(utils.ResponderDebit, cd, &cc); err != nil {
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -41,14 +40,14 @@ var (
|
||||
smgLoadInst utils.LoadInstance // Share load information between tests
|
||||
|
||||
sTestTutSMG = []func(t *testing.T){
|
||||
TestTutSMGInitCfg,
|
||||
TestTutSMGResetDataDb,
|
||||
TestTutSMGResetStorDb,
|
||||
TestTutSMGStartEngine,
|
||||
TestTutSMGRpcConn,
|
||||
TestTutSMGLoadTariffPlanFromFolder,
|
||||
TestTutSMGCacheStats,
|
||||
TestTutSMGStopCgrEngine,
|
||||
testTutSMGInitCfg,
|
||||
testTutSMGResetDataDb,
|
||||
testTutSMGResetStorDb,
|
||||
testTutSMGStartEngine,
|
||||
testTutSMGRpcConn,
|
||||
testTutSMGLoadTariffPlanFromFolder,
|
||||
testTutSMGCacheStats,
|
||||
testTutSMGStopCgrEngine,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -58,7 +57,7 @@ func TestTutSMG(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTutSMGInitCfg(t *testing.T) {
|
||||
func testTutSMGInitCfg(t *testing.T) {
|
||||
tutSMGCfgPath = path.Join(*dataDir, "conf", "samples", "smgeneric")
|
||||
// Init config first
|
||||
var err error
|
||||
@@ -71,37 +70,37 @@ func TestTutSMGInitCfg(t *testing.T) {
|
||||
}
|
||||
|
||||
// Remove data in both rating and accounting db
|
||||
func TestTutSMGResetDataDb(t *testing.T) {
|
||||
func testTutSMGResetDataDb(t *testing.T) {
|
||||
if err := engine.InitDataDb(tutSMGCfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wipe out the cdr database
|
||||
func TestTutSMGResetStorDb(t *testing.T) {
|
||||
func testTutSMGResetStorDb(t *testing.T) {
|
||||
if err := engine.InitStorDb(tutSMGCfg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start CGR Engine
|
||||
func TestTutSMGStartEngine(t *testing.T) {
|
||||
func testTutSMGStartEngine(t *testing.T) {
|
||||
if _, err := engine.StopStartEngine(tutSMGCfgPath, *waitRater); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect rpc client to rater
|
||||
func TestTutSMGRpcConn(t *testing.T) {
|
||||
func testTutSMGRpcConn(t *testing.T) {
|
||||
var err error
|
||||
tutSMGRpc, err = jsonrpc.Dial("tcp", tutSMGCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
tutSMGRpc, err = newRPCClient(tutSMGCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load the tariff plan, creating accounts and their balances
|
||||
func TestTutSMGLoadTariffPlanFromFolder(t *testing.T) {
|
||||
func testTutSMGLoadTariffPlanFromFolder(t *testing.T) {
|
||||
attrs := &utils.AttrLoadTpFromFolder{FolderPath: path.Join(*dataDir, "tariffplans", "oldtutorial")}
|
||||
if err := tutSMGRpc.Call(utils.ApierV2LoadTariffPlanFromFolder, attrs, &smgLoadInst); err != nil {
|
||||
t.Error(err)
|
||||
@@ -110,7 +109,7 @@ func TestTutSMGLoadTariffPlanFromFolder(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check loaded stats
|
||||
func TestTutSMGCacheStats(t *testing.T) {
|
||||
func testTutSMGCacheStats(t *testing.T) {
|
||||
var reply string
|
||||
if err := tutSMGRpc.Call(utils.CacheSv1LoadCache, utils.AttrReloadCache{}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
@@ -144,14 +143,14 @@ func TestTutSMGCacheStats(t *testing.T) {
|
||||
expectedStats[utils.CacheActionTriggers].Items = 1
|
||||
expectedStats[utils.CacheLoadIDs].Items = 20
|
||||
expectedStats[utils.CacheChargerProfiles].Items = 1
|
||||
if err := tutSMGRpc.Call(utils.CacheSv1GetCacheStats, nil, &rcvStats); err != nil {
|
||||
if err := tutSMGRpc.Call(utils.CacheSv1GetCacheStats, new(utils.AttrCacheIDsWithArgDispatcher), &rcvStats); err != nil {
|
||||
t.Error("Got error on CacheSv1.GetCacheStats: ", err.Error())
|
||||
} else if !reflect.DeepEqual(expectedStats, rcvStats) {
|
||||
t.Errorf("Calling ApierV2.CacheSv1 expected: %+v,\n received: %+v", utils.ToJSON(expectedStats), utils.ToJSON(rcvStats))
|
||||
}
|
||||
}
|
||||
|
||||
func TestTutSMGStopCgrEngine(t *testing.T) {
|
||||
func testTutSMGStopCgrEngine(t *testing.T) {
|
||||
if err := engine.KillEngine(100); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ package general_tests
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -94,7 +93,7 @@ func testTutStartEngine(t *testing.T) {
|
||||
|
||||
func testTutRpcConn(t *testing.T) {
|
||||
var err error
|
||||
if tutRpc, err = jsonrpc.Dial("tcp", tutCfg.ListenCfg().RPCJSONListen); err != nil {
|
||||
if tutRpc, err = newRPCClient(tutCfg.ListenCfg()); err != nil {
|
||||
t.Fatal("could not connect to rater: ", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -307,12 +306,13 @@ func testTutAccounts(t *testing.T) {
|
||||
}, &reply); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
acnt = &engine.Account{}
|
||||
if err := tutRpc.Call(utils.ApierV2GetAccount,
|
||||
&utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"},
|
||||
&acnt); err != nil {
|
||||
t.Error(err)
|
||||
} else if acnt.Disabled {
|
||||
t.Errorf("account: %s", utils.ToIJSON(acnt))
|
||||
t.Errorf("account: %s", utils.ToJSON(acnt))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ package general_tests
|
||||
import (
|
||||
"flag"
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -109,7 +108,7 @@ func testTutorialStartEngine(t *testing.T) {
|
||||
|
||||
func testTutorialRpcConn(t *testing.T) {
|
||||
var err error
|
||||
tutorialRpc, err = jsonrpc.Dial("tcp", tutorialCfg.ListenCfg().RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
|
||||
tutorialRpc, err = newRPCClient(tutorialCfg.ListenCfg()) // We connect over JSON so we can also troubleshoot if needed
|
||||
if err != nil {
|
||||
t.Fatal("Could not connect to rater: ", err.Error())
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ cdrc=$?
|
||||
echo 'go test github.com/cgrates/cgrates/ers -tags=integration -rpc=*gob'
|
||||
go test github.com/cgrates/cgrates/ers -tags=integration -rpc=*gob
|
||||
ers=$?
|
||||
# echo 'go test github.com/cgrates/cgrates/general_tests -tags=integration'
|
||||
# go test github.com/cgrates/cgrates/general_tests -tags=integration
|
||||
# gnr=$?
|
||||
echo 'go test github.com/cgrates/cgrates/general_tests -tags=integration -rpc=*gob'
|
||||
go test github.com/cgrates/cgrates/general_tests -tags=integration -rpc=*gob
|
||||
gnr=$?
|
||||
# echo 'go test github.com/cgrates/cgrates/agents -tags=integration'
|
||||
# go test github.com/cgrates/cgrates/agents -tags=integration
|
||||
# agts=$?
|
||||
|
||||
Reference in New Issue
Block a user