From d437ae08d7e6338e0a04a2942828f7001e37fd2e Mon Sep 17 00:00:00 2001 From: Trial97 Date: Fri, 27 Mar 2020 12:57:15 +0200 Subject: [PATCH] Added tests for CDRsV2ProcessEvent --- .../samples/cdrsv1processevent/cgrates.json | 4 + .../cdrsv1processeventmongo/cgrates.json | 4 + .../cdrsv1processeventmysql/cgrates.json | 4 + engine/responder.go | 30 +----- general_tests/cdrs_processevent_it_test.go | 95 +++++++++++++++++++ utils/consts.go | 1 + 6 files changed, 113 insertions(+), 25 deletions(-) diff --git a/data/conf/samples/cdrsv1processevent/cgrates.json b/data/conf/samples/cdrsv1processevent/cgrates.json index 3dd7a43ab..149d88e97 100644 --- a/data/conf/samples/cdrsv1processevent/cgrates.json +++ b/data/conf/samples/cdrsv1processevent/cgrates.json @@ -75,4 +75,8 @@ "scheduler_conns": ["*internal"], }, +"caches":{ + "*rpc_responses": {"limit": -1, "ttl": "2s", "static_ttl": false}, +}, + } diff --git a/data/conf/samples/cdrsv1processeventmongo/cgrates.json b/data/conf/samples/cdrsv1processeventmongo/cgrates.json index 35dc0f955..63f33d5d7 100644 --- a/data/conf/samples/cdrsv1processeventmongo/cgrates.json +++ b/data/conf/samples/cdrsv1processeventmongo/cgrates.json @@ -79,4 +79,8 @@ "scheduler_conns": ["*internal"], }, +"caches":{ + "*rpc_responses": {"limit": -1, "ttl": "2s", "static_ttl": false}, +}, + } diff --git a/data/conf/samples/cdrsv1processeventmysql/cgrates.json b/data/conf/samples/cdrsv1processeventmysql/cgrates.json index 86df4df7c..4501189d2 100644 --- a/data/conf/samples/cdrsv1processeventmysql/cgrates.json +++ b/data/conf/samples/cdrsv1processeventmysql/cgrates.json @@ -76,4 +76,8 @@ "scheduler_conns": ["*internal"], }, +"caches":{ + "*rpc_responses": {"limit": -1, "ttl": "2s", "static_ttl": false}, +}, + } diff --git a/engine/responder.go b/engine/responder.go index ec73cfda1..22874df14 100644 --- a/engine/responder.go +++ b/engine/responder.go @@ -67,7 +67,7 @@ RPC method that provides the external RPC interface for getting the rating infor */ func (rs *Responder) GetCost(arg *CallDescriptorWithArgDispatcher, reply *CallCost) (err error) { // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { + if arg.CgrID != utils.EmptyString && config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { cacheKey := utils.ConcatenatedKey(utils.ResponderGetCost, arg.CgrID) refID := guardian.Guardian.GuardIDs("", config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic @@ -112,26 +112,6 @@ func (rs *Responder) GetCost(arg *CallDescriptorWithArgDispatcher, reply *CallCo //GetCostOnRatingPlans is used by SupplierS to calculate the cost // Receive a list of RatingPlans and pick the first without error func (rs *Responder) GetCostOnRatingPlans(arg *utils.GetCostOnRatingPlansArgs, reply *map[string]interface{}) (err error) { - // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { - cacheKey := utils.ConcatenatedKey(utils.ResponderGetCostOnRatingPlans, utils.UUIDSha1Prefix()) - refID := guardian.Guardian.GuardIDs("", - config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic - defer guardian.Guardian.UnguardIDs(refID) - - if itm, has := Cache.Get(utils.CacheRPCResponses, cacheKey); has { - cachedResp := itm.(*utils.CachedRPCResponse) - if cachedResp.Error == nil { - *reply = *cachedResp.Result.(*map[string]interface{}) - } - return cachedResp.Error - } - defer Cache.Set(utils.CacheRPCResponses, cacheKey, - &utils.CachedRPCResponse{Result: reply, Error: err}, - nil, true, utils.NonTransactional) - } - // end of RPC caching - for _, rp := range arg.RatingPlanIDs { // loop through RatingPlans until we find one without errors rPrfl := &RatingProfile{ Id: utils.ConcatenatedKey(utils.META_OUT, @@ -176,7 +156,7 @@ func (rs *Responder) GetCostOnRatingPlans(arg *utils.GetCostOnRatingPlansArgs, r func (rs *Responder) Debit(arg *CallDescriptorWithArgDispatcher, reply *CallCost) (err error) { // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { + if arg.CgrID != utils.EmptyString && config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { cacheKey := utils.ConcatenatedKey(utils.ResponderDebit, arg.CgrID) refID := guardian.Guardian.GuardIDs("", config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic @@ -214,7 +194,7 @@ func (rs *Responder) Debit(arg *CallDescriptorWithArgDispatcher, reply *CallCost func (rs *Responder) MaxDebit(arg *CallDescriptorWithArgDispatcher, reply *CallCost) (err error) { // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { + if arg.CgrID != utils.EmptyString && config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { cacheKey := utils.ConcatenatedKey(utils.ResponderMaxDebit, arg.CgrID) refID := guardian.Guardian.GuardIDs("", config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic @@ -251,7 +231,7 @@ func (rs *Responder) MaxDebit(arg *CallDescriptorWithArgDispatcher, reply *CallC func (rs *Responder) RefundIncrements(arg *CallDescriptorWithArgDispatcher, reply *Account) (err error) { // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { + if arg.CgrID != utils.EmptyString && config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { cacheKey := utils.ConcatenatedKey(utils.ResponderRefundIncrements, arg.CgrID) refID := guardian.Guardian.GuardIDs("", config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic @@ -289,7 +269,7 @@ func (rs *Responder) RefundIncrements(arg *CallDescriptorWithArgDispatcher, repl func (rs *Responder) RefundRounding(arg *CallDescriptorWithArgDispatcher, reply *float64) (err error) { // RPC caching - if config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { + if arg.CgrID != utils.EmptyString && config.CgrConfig().CacheCfg()[utils.CacheRPCResponses].Limit != 0 { cacheKey := utils.ConcatenatedKey(utils.ResponderRefundRounding, arg.CgrID) refID := guardian.Guardian.GuardIDs("", config.CgrConfig().GeneralCfg().LockingTimeout, cacheKey) // RPC caching needs to be atomic diff --git a/general_tests/cdrs_processevent_it_test.go b/general_tests/cdrs_processevent_it_test.go index 0c4ed05fd..c696274a7 100644 --- a/general_tests/cdrs_processevent_it_test.go +++ b/general_tests/cdrs_processevent_it_test.go @@ -58,6 +58,9 @@ var ( testV1CDRsProcessEventStore, testV1CDRsProcessEventThreshold, testV1CDRsProcessEventExportCheck, + + testV1CDRsV2ProcessEventRalS, + testV1CDRsKillEngine, } ) @@ -597,6 +600,98 @@ func testV1CDRsProcessEventExportCheck(t *testing.T) { t.Fatal("Could not find the file in folder") } } + +func testV1CDRsV2ProcessEventRalS(t *testing.T) { + argsEv := &engine.ArgV1ProcessEvent{ + Flags: []string{utils.MetaRALs, "*attributes:false", "*chargers:false", "*export:false"}, + CGREvent: utils.CGREvent{ + Tenant: "cgrates.org", + ID: "test101", + Event: map[string]interface{}{ + utils.RunID: "testv1", + utils.OriginID: "test3_v2processEvent", + utils.OriginHost: "OriginHost101", + utils.RequestType: utils.META_PSEUDOPREPAID, + utils.Account: "1001", + utils.Destination: "+4986517174963", + utils.AnswerTime: time.Date(2019, 11, 27, 12, 21, 26, 0, time.UTC), + utils.Usage: 2 * time.Minute, + }, + }, + } + expRply := []*utils.EventWithFlags{ + { + Flags: []string{}, + Event: map[string]interface{}{ + "Account": "1001", + "AnswerTime": "2019-11-27T12:21:26Z", + "CGRID": "d13c705aa38164aaf297fb77d7700565a3cea04b", + "Category": "call", + "Cost": 0.0204, + "CostDetails": nil, + "CostSource": "*cdrs", + "Destination": "+4986517174963", + "ExtraInfo": "", + "OrderID": 0., + "OriginHost": "OriginHost101", + "OriginID": "test3_v2processEvent", + "Partial": false, + "PreRated": false, + "RequestType": "*pseudoprepaid", + "RunID": "testv1", + "SetupTime": "0001-01-01T00:00:00Z", + "Source": "", + "Subject": "1001", + "Tenant": "cgrates.org", + "ToR": "*voice", + "Usage": 120000000000., + }, + }, + } + var reply []*utils.EventWithFlags + if err := pecdrsRpc.Call(utils.CDRsV2ProcessEvent, argsEv, &reply); err != nil { + t.Error(err) + } + reply[0].Event["CostDetails"] = nil + expRply[0].Event["CGRID"] = reply[0].Event["CGRID"] + if !reflect.DeepEqual(reply[0], expRply[0]) { + t.Errorf("Expected %s, received: %s ", utils.ToJSON(expRply), utils.ToJSON(reply)) + } + var cdrs []*engine.CDR + if err := pecdrsRpc.Call(utils.CDRsV1GetCDRs, &utils.RPCCDRsFilterWithArgDispatcher{ + RPCCDRsFilter: &utils.RPCCDRsFilter{OriginHosts: []string{"OriginHost101"}}}, &cdrs); err != nil { + t.Fatal("Unexpected error: ", err.Error()) + } else if len(cdrs) != 1 { + t.Errorf("Expecting: 1, received: %+v", len(cdrs)) + } else if !reflect.DeepEqual(cdrs[0].Cost, 0.0204) { + t.Errorf("\nExpected: %+v,\nreceived: %+v", 0.0204, utils.ToJSON(cdrs[0])) + } + + argsEv.Flags = append(argsEv.Flags, utils.MetaRerate) + argsEv.CGREvent.ID = "test1002" + argsEv.CGREvent.Event[utils.Usage] = time.Minute + + if err := pecdrsRpc.Call(utils.CDRsV2ProcessEvent, argsEv, &reply); err != nil { + t.Error(err) + } + expRply[0].Flags = []string{utils.MetaRefund} + expRply[0].Event["Usage"] = 60000000000. + expRply[0].Event["Cost"] = 0.0102 + reply[0].Event["CostDetails"] = nil + if !reflect.DeepEqual(reply[0], expRply[0]) { + t.Errorf("Expected %s, received: %s ", utils.ToJSON(expRply), utils.ToJSON(reply)) + } + + argsEv.CGREvent.Event[utils.Usage] = 30 * time.Second + if err := pecdrsRpc.Call(utils.CDRsV2ProcessEvent, argsEv, &reply); err != nil { + t.Error(err) + } + reply[0].Event["CostDetails"] = nil + if !reflect.DeepEqual(reply[0], expRply[0]) { + t.Errorf("Expected %s, received: %s ", utils.ToJSON(expRply), utils.ToJSON(reply)) + } +} + func testV1CDRsKillEngine(t *testing.T) { if err := engine.KillEngine(*waitRater); err != nil { t.Error(err) diff --git a/utils/consts.go b/utils/consts.go index 6bdd827e5..d309c6f6b 100755 --- a/utils/consts.go +++ b/utils/consts.go @@ -1446,6 +1446,7 @@ const ( CDRsV2StoreSessionCost = "CDRsV2.StoreSessionCost" CdrsV2ProcessExternalCdr = "CdrsV2.ProcessExternalCdr" CdrsV2ProcessCdr = "CdrsV2.ProcessCdr" + CDRsV2ProcessEvent = "CDRsV2.ProcessEvent" ) // Scheduler