diff --git a/.travis.yml b/.travis.yml index 899b7c359..9b4f37383 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ install: - go get github.com/Masterminds/glide - glide install -script: $TRAVIS_BUILD_DIR/test.sh +script: go test -v $(glide novendor) branches: only: master diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index b2a3821bc..6c3e9a5e2 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -42,6 +42,7 @@ information, please see the [`CONTRIBUTING.md`](CONTRIBUTING.md) file. | @bhepp | Brice Heppner | | @noahmehl | Noah Mehl | | @elfranne | Tom Braarup Cuykens | +| @rbarrabe | Régis Barrabé | diff --git a/agents/dmtagent.go b/agents/dmtagent.go index 2b5466bac..eead8072d 100644 --- a/agents/dmtagent.go +++ b/agents/dmtagent.go @@ -32,7 +32,7 @@ import ( "github.com/fiorix/go-diameter/diam/sm" ) -func NewDiameterAgent(cgrCfg *config.CGRConfig, smg *rpcclient.RpcClient, pubsubs *rpcclient.RpcClient) (*DiameterAgent, error) { +func NewDiameterAgent(cgrCfg *config.CGRConfig, smg rpcclient.RpcClientConnection, pubsubs rpcclient.RpcClientConnection) (*DiameterAgent, error) { da := &DiameterAgent{cgrCfg: cgrCfg, smg: smg, pubsubs: pubsubs} dictsDir := cgrCfg.DiameterAgentCfg().DictionariesDir if len(dictsDir) != 0 { @@ -45,8 +45,8 @@ func NewDiameterAgent(cgrCfg *config.CGRConfig, smg *rpcclient.RpcClient, pubsub type DiameterAgent struct { cgrCfg *config.CGRConfig - smg *rpcclient.RpcClient // Connection towards CGR-SMG component - pubsubs *rpcclient.RpcClient // Connection towards CGR-PubSub component + smg rpcclient.RpcClientConnection // Connection towards CGR-SMG component + pubsubs rpcclient.RpcClientConnection // Connection towards CGR-PubSub component } // Creates the message handlers @@ -69,7 +69,7 @@ func (self *DiameterAgent) handlers() diam.Handler { return dSM } -func (self DiameterAgent) processCCR(ccr *CCR, reqProcessor *config.DARequestProcessor, cca *CCA) (bool, error) { +func (self DiameterAgent) processCCR(ccr *CCR, reqProcessor *config.DARequestProcessor, processorVars map[string]string, cca *CCA) (bool, error) { passesAllFilters := true for _, fldFilter := range reqProcessor.RequestFilter { if passes, _ := passesFieldFilter(ccr.diamMessage, fldFilter, nil); !passes { @@ -122,7 +122,6 @@ func (self DiameterAgent) processCCR(ccr *CCR, reqProcessor *config.DARequestPro } } var maxUsage float64 - processorVars := make(map[string]string) processorVars[CGRResultCode] = strconv.Itoa(diam.Success) processorVars[CGRError] = "" if reqProcessor.DryRun { // DryRun does not send over network @@ -166,6 +165,15 @@ func (self DiameterAgent) processCCR(ccr *CCR, reqProcessor *config.DARequestPro processorVars[CGRResultCode] = strconv.Itoa(DiameterRatingFailed) } } + if maxUsage < 0 { + maxUsage = 0 + } + if prevMaxUsageStr, hasKey := processorVars[CGRMaxUsage]; hasKey { + prevMaxUsage, _ := strconv.ParseFloat(prevMaxUsageStr, 64) + if prevMaxUsage < maxUsage { + maxUsage = prevMaxUsage + } + } processorVars[CGRMaxUsage] = strconv.FormatFloat(maxUsage, 'f', -1, 64) } if err := messageSetAVPsWithPath(cca.diamMessage, []interface{}{"Result-Code"}, processorVars[CGRResultCode], @@ -190,10 +198,14 @@ func (self *DiameterAgent) handleCCR(c diam.Conn, m *diam.Message) { return } cca := NewBareCCAFromCCR(ccr, self.cgrCfg.DiameterAgentCfg().OriginHost, self.cgrCfg.DiameterAgentCfg().OriginRealm) - var processed bool + var processed, lclProcessed bool + processorVars := make(map[string]string) // Shared between processors for _, reqProcessor := range self.cgrCfg.DiameterAgentCfg().RequestProcessors { - processed, err = self.processCCR(ccr, reqProcessor, cca) - if err != nil || (processed && !reqProcessor.ContinueOnSuccess) { + lclProcessed, err = self.processCCR(ccr, reqProcessor, processorVars, cca) + if lclProcessed { // Process local so we don't overwrite globally + processed = lclProcessed + } + if err != nil || (lclProcessed && !reqProcessor.ContinueOnSuccess) { break } } diff --git a/agents/dmtagent_it_test.go b/agents/dmtagent_it_test.go index eff5e81ee..b91891107 100644 --- a/agents/dmtagent_it_test.go +++ b/agents/dmtagent_it_test.go @@ -208,7 +208,7 @@ func TestDmtAgentTPFromFolder(t *testing.T) { if err := apierRpc.Call("ApierV2.LoadTariffPlanFromFolder", attrs, &loadInst); err != nil { t.Error(err) } - time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for scheduler to execute topups + time.Sleep(time.Duration(1000) * time.Millisecond) // Give time for scheduler to execute topups } func TestConnectDiameterClient(t *testing.T) { @@ -242,7 +242,7 @@ func TestDmtAgentSendCCRInit(t *testing.T) { if err := dmtClient.SendMessage(m); err != nil { t.Error(err) } - time.Sleep(time.Duration(100) * time.Millisecond) + time.Sleep(time.Duration(*waitRater) * time.Millisecond) msg := dmtClient.ReceivedMessage() if avps, err := msg.FindAVPsWithPath([]interface{}{"Granted-Service-Unit", "CC-Time"}, dict.UndefinedVendorID); err != nil { t.Error(err) @@ -286,7 +286,7 @@ func TestDmtAgentSendCCRUpdate(t *testing.T) { if err := dmtClient.SendMessage(m); err != nil { t.Error(err) } - time.Sleep(time.Duration(100) * time.Millisecond) + time.Sleep(time.Duration(*waitRater) * time.Millisecond) msg := dmtClient.ReceivedMessage() if avps, err := msg.FindAVPsWithPath([]interface{}{"Granted-Service-Unit", "CC-Time"}, dict.UndefinedVendorID); err != nil { t.Error(err) @@ -325,7 +325,7 @@ func TestDmtAgentSendCCRUpdate2(t *testing.T) { if err := dmtClient.SendMessage(m); err != nil { t.Error(err) } - time.Sleep(time.Duration(100) * time.Millisecond) + time.Sleep(time.Duration(*waitRater) * time.Millisecond) msg := dmtClient.ReceivedMessage() if avps, err := msg.FindAVPsWithPath([]interface{}{"Granted-Service-Unit", "CC-Time"}, dict.UndefinedVendorID); err != nil { t.Error(err) @@ -363,7 +363,7 @@ func TestDmtAgentSendCCRTerminate(t *testing.T) { if err := dmtClient.SendMessage(m); err != nil { t.Error(err) } - time.Sleep(time.Duration(150) * time.Millisecond) + time.Sleep(time.Duration(*waitRater) * time.Millisecond) msg := dmtClient.ReceivedMessage() if msg == nil { t.Fatal("No answer to CCR terminate received") @@ -626,7 +626,7 @@ func TestDmtAgentSendCCRSimpaEvent(t *testing.T) { }), diam.NewAVP(29000, avp.Mbit, 2011, &diam.GroupedAVP{ // MC-Information AVP: []*diam.AVP{ - diam.NewAVP(29001, avp.Mbit, 2011, datatype.OctetString("0x38924012914528")), // HighLayerCharacteristics + diam.NewAVP(20938, avp.Mbit, 2011, datatype.OctetString("0x38924012914528")), // HighLayerCharacteristics diam.NewAVP(29002, avp.Mbit, 2011, datatype.UTF8String("12928471313847173")), // MC-Service-Id diam.NewAVP(29003, avp.Mbit, 2011, datatype.UTF8String("SPV123456012123")), // TransparentData diam.NewAVP(1201, avp.Mbit, 10415, &diam.GroupedAVP{ // MC-Information @@ -641,7 +641,7 @@ func TestDmtAgentSendCCRSimpaEvent(t *testing.T) { if err := dmtClient.SendMessage(ccr); err != nil { t.Error(err) } - time.Sleep(time.Duration(100) * time.Millisecond) + time.Sleep(time.Duration(*waitRater) * time.Millisecond) msg := dmtClient.ReceivedMessage() // Discard the received message so we can test next one if msg == nil { t.Fatal("No message returned") @@ -675,6 +675,261 @@ func TestDmtAgentCdrs(t *testing.T) { } } +func TestDmtAgentSendDataGrpInit(t *testing.T) { + if !*testIntegration { + return + } + ccr := diam.NewRequest(diam.CreditControl, 4, nil) + ccr.NewAVP(avp.SessionID, avp.Mbit, 0, datatype.UTF8String("testdatagrp")) + ccr.NewAVP(avp.OriginHost, avp.Mbit, 0, datatype.DiameterIdentity("CGR-DA")) + ccr.NewAVP(avp.OriginRealm, avp.Mbit, 0, datatype.DiameterIdentity("cgrates.org")) + ccr.NewAVP(avp.AuthApplicationID, avp.Mbit, 0, datatype.Unsigned32(4)) + ccr.NewAVP(avp.ServiceContextID, avp.Mbit, 0, datatype.UTF8String("gprs@huawei.com")) + ccr.NewAVP(avp.CCRequestType, avp.Mbit, 0, datatype.Enumerated(1)) + ccr.NewAVP(avp.CCRequestNumber, avp.Mbit, 0, datatype.Unsigned32(1)) + ccr.NewAVP(avp.EventTimestamp, avp.Mbit, 0, datatype.Time(time.Date(2016, 1, 5, 11, 30, 10, 0, time.UTC))) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(0)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("1001")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(1)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("104502200011")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.ServiceIdentifier, avp.Mbit, 0, datatype.Unsigned32(0)) + ccr.NewAVP(avp.RequestedAction, avp.Mbit, 0, datatype.Enumerated(0)) + ccr.NewAVP(avp.RequestedServiceUnit, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.CCTime, avp.Mbit, 0, datatype.Unsigned32(1))}}) + ccr.NewAVP(873, avp.Mbit, 10415, &diam.GroupedAVP{ // + AVP: []*diam.AVP{ + diam.NewAVP(20300, avp.Mbit, 2011, &diam.GroupedAVP{ // IN-Information + AVP: []*diam.AVP{ + diam.NewAVP(20302, avp.Mbit, 2011, datatype.UTF8String("22509")), // Calling-Vlr-Number + diam.NewAVP(20385, avp.Mbit, 2011, datatype.UTF8String("4002")), // Called-Party-NP + }, + }), + diam.NewAVP(2000, avp.Mbit, 10415, &diam.GroupedAVP{ // SMS-Information + AVP: []*diam.AVP{ + diam.NewAVP(886, avp.Mbit, 10415, &diam.GroupedAVP{ // Originator-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1003")), // Address-Data + }}), + diam.NewAVP(1201, avp.Mbit, 10415, &diam.GroupedAVP{ // Recipient-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1002")), // Address-Data + }}), + }, + }), + }}) + if err := dmtClient.SendMessage(ccr); err != nil { + t.Error(err) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) + msg := dmtClient.ReceivedMessage() + if msg == nil { + t.Fatal("No message returned") + } + if avps, err := msg.FindAVPsWithPath([]interface{}{"Result-Code"}, dict.UndefinedVendorID); err != nil { + t.Error(err) + } else if len(avps) == 0 { + t.Error("Result-Code not found") + } else if resCode := avpValAsString(avps[0]); resCode != "2001" { + t.Errorf("Expecting 2001, received: %s", resCode) + } +} + +func TestDmtAgentSendDataGrpUpdate(t *testing.T) { + if !*testIntegration { + return + } + ccr := diam.NewRequest(diam.CreditControl, 4, nil) + ccr.NewAVP(avp.SessionID, avp.Mbit, 0, datatype.UTF8String("testdatagrp")) + ccr.NewAVP(avp.OriginHost, avp.Mbit, 0, datatype.DiameterIdentity("CGR-DA")) + ccr.NewAVP(avp.OriginRealm, avp.Mbit, 0, datatype.DiameterIdentity("cgrates.org")) + ccr.NewAVP(avp.AuthApplicationID, avp.Mbit, 0, datatype.Unsigned32(4)) + ccr.NewAVP(avp.ServiceContextID, avp.Mbit, 0, datatype.UTF8String("gprs@huawei.com")) + ccr.NewAVP(avp.CCRequestType, avp.Mbit, 0, datatype.Enumerated(2)) + ccr.NewAVP(avp.CCRequestNumber, avp.Mbit, 0, datatype.Unsigned32(1)) + ccr.NewAVP(avp.EventTimestamp, avp.Mbit, 0, datatype.Time(time.Date(2016, 1, 5, 11, 30, 10, 0, time.UTC))) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(0)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("1001")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(1)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("104502200011")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.ServiceIdentifier, avp.Mbit, 0, datatype.Unsigned32(0)) + ccr.NewAVP(avp.RequestedAction, avp.Mbit, 0, datatype.Enumerated(0)) + ccr.NewAVP(avp.RequestedServiceUnit, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.CCTime, avp.Mbit, 0, datatype.Unsigned32(1))}}) + ccr.NewAVP(873, avp.Mbit, 10415, &diam.GroupedAVP{ // + AVP: []*diam.AVP{ + diam.NewAVP(20300, avp.Mbit, 2011, &diam.GroupedAVP{ // IN-Information + AVP: []*diam.AVP{ + diam.NewAVP(20302, avp.Mbit, 2011, datatype.UTF8String("22509")), // Calling-Vlr-Number + diam.NewAVP(20385, avp.Mbit, 2011, datatype.UTF8String("4002")), // Called-Party-NP + }, + }), + diam.NewAVP(2000, avp.Mbit, 10415, &diam.GroupedAVP{ // SMS-Information + AVP: []*diam.AVP{ + diam.NewAVP(886, avp.Mbit, 10415, &diam.GroupedAVP{ // Originator-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1003")), // Address-Data + }}), + diam.NewAVP(1201, avp.Mbit, 10415, &diam.GroupedAVP{ // Recipient-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1002")), // Address-Data + }}), + }, + }), + }}) + ccr.NewAVP("Multiple-Services-Credit-Control", avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(446, avp.Mbit, 0, &diam.GroupedAVP{ // Used-Service-Unit + AVP: []*diam.AVP{ + diam.NewAVP(452, avp.Mbit, 0, datatype.Enumerated(0)), // Tariff-Change-Usage + diam.NewAVP(420, avp.Mbit, 0, datatype.Unsigned32(20)), // CC-Time + diam.NewAVP(412, avp.Mbit, 0, datatype.Unsigned64(1000)), // CC-Input-Octets + diam.NewAVP(414, avp.Mbit, 0, datatype.Unsigned64(24)), // CC-Output-Octets + }, + }), + diam.NewAVP(432, avp.Mbit, 0, datatype.Unsigned32(1)), // Data session for group 1 + }, + }) + ccr.NewAVP("Multiple-Services-Credit-Control", avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(446, avp.Mbit, 0, &diam.GroupedAVP{ // Used-Service-Unit + AVP: []*diam.AVP{ + diam.NewAVP(452, avp.Mbit, 0, datatype.Enumerated(0)), // Tariff-Change-Usage + diam.NewAVP(420, avp.Mbit, 0, datatype.Unsigned32(20)), // CC-Time + diam.NewAVP(412, avp.Mbit, 0, datatype.Unsigned64(1024)), // CC-Input-Octets + diam.NewAVP(414, avp.Mbit, 0, datatype.Unsigned64(512)), // CC-Output-Octets + }, + }), + diam.NewAVP(432, avp.Mbit, 0, datatype.Unsigned32(2)), // Data session for group 2 + }, + }) + if err := dmtClient.SendMessage(ccr); err != nil { + t.Error(err) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) + msg := dmtClient.ReceivedMessage() + if msg == nil { + t.Fatal("No message returned") + } + if avps, err := msg.FindAVPsWithPath([]interface{}{"Result-Code"}, dict.UndefinedVendorID); err != nil { + t.Error(err) + } else if len(avps) == 0 { + t.Error("Result-Code not found") + } else if resCode := avpValAsString(avps[0]); resCode != "2001" { + t.Errorf("Expecting 2001, received: %s", resCode) + } +} + +func TestDmtAgentSendDataGrpTerminate(t *testing.T) { + if !*testIntegration { + return + } + ccr := diam.NewRequest(diam.CreditControl, 4, nil) + ccr.NewAVP(avp.SessionID, avp.Mbit, 0, datatype.UTF8String("testdatagrp")) + ccr.NewAVP(avp.OriginHost, avp.Mbit, 0, datatype.DiameterIdentity("CGR-DA")) + ccr.NewAVP(avp.OriginRealm, avp.Mbit, 0, datatype.DiameterIdentity("cgrates.org")) + ccr.NewAVP(avp.AuthApplicationID, avp.Mbit, 0, datatype.Unsigned32(4)) + ccr.NewAVP(avp.ServiceContextID, avp.Mbit, 0, datatype.UTF8String("gprs@huawei.com")) + ccr.NewAVP(avp.CCRequestType, avp.Mbit, 0, datatype.Enumerated(3)) + ccr.NewAVP(avp.CCRequestNumber, avp.Mbit, 0, datatype.Unsigned32(1)) + ccr.NewAVP(avp.EventTimestamp, avp.Mbit, 0, datatype.Time(time.Date(2016, 1, 5, 11, 30, 10, 0, time.UTC))) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(0)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("1001")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.SubscriptionID, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.SubscriptionIDType, avp.Mbit, 0, datatype.Enumerated(1)), + diam.NewAVP(avp.SubscriptionIDData, avp.Mbit, 0, datatype.UTF8String("104502200011")), // Subscription-Id-Data + }}) + ccr.NewAVP(avp.ServiceIdentifier, avp.Mbit, 0, datatype.Unsigned32(0)) + ccr.NewAVP(avp.RequestedAction, avp.Mbit, 0, datatype.Enumerated(0)) + ccr.NewAVP(avp.RequestedServiceUnit, avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(avp.CCTime, avp.Mbit, 0, datatype.Unsigned32(1))}}) + ccr.NewAVP(873, avp.Mbit, 10415, &diam.GroupedAVP{ // + AVP: []*diam.AVP{ + diam.NewAVP(20300, avp.Mbit, 2011, &diam.GroupedAVP{ // IN-Information + AVP: []*diam.AVP{ + diam.NewAVP(20302, avp.Mbit, 2011, datatype.UTF8String("22509")), // Calling-Vlr-Number + diam.NewAVP(20385, avp.Mbit, 2011, datatype.UTF8String("4002")), // Called-Party-NP + }, + }), + diam.NewAVP(2000, avp.Mbit, 10415, &diam.GroupedAVP{ // SMS-Information + AVP: []*diam.AVP{ + diam.NewAVP(886, avp.Mbit, 10415, &diam.GroupedAVP{ // Originator-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1003")), // Address-Data + }}), + diam.NewAVP(1201, avp.Mbit, 10415, &diam.GroupedAVP{ // Recipient-Address + AVP: []*diam.AVP{ + diam.NewAVP(899, avp.Mbit, 10415, datatype.Enumerated(1)), // Address-Type + diam.NewAVP(897, avp.Mbit, 10415, datatype.UTF8String("1002")), // Address-Data + }}), + }, + }), + }}) + ccr.NewAVP("Multiple-Services-Credit-Control", avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(446, avp.Mbit, 0, &diam.GroupedAVP{ // Used-Service-Unit + AVP: []*diam.AVP{ + diam.NewAVP(452, avp.Mbit, 0, datatype.Enumerated(0)), // Tariff-Change-Usage + diam.NewAVP(420, avp.Mbit, 0, datatype.Unsigned32(20)), // CC-Time + diam.NewAVP(412, avp.Mbit, 0, datatype.Unsigned64(512)), // CC-Input-Octets + diam.NewAVP(414, avp.Mbit, 0, datatype.Unsigned64(0)), // CC-Output-Octets + }, + }), + }, + }) + if err := dmtClient.SendMessage(ccr); err != nil { + t.Error(err) + } + time.Sleep(time.Duration(3000) * time.Millisecond) + msg := dmtClient.ReceivedMessage() + if msg == nil { + t.Fatal("No message returned") + } + if avps, err := msg.FindAVPsWithPath([]interface{}{"Result-Code"}, dict.UndefinedVendorID); err != nil { + t.Error(err) + } else if len(avps) == 0 { + t.Error("Result-Code not found") + } else if resCode := avpValAsString(avps[0]); resCode != "2001" { + t.Errorf("Expecting 2001, received: %s", resCode) + } +} + +func TestDmtAgentSendDataGrpCDRs(t *testing.T) { + if !*testIntegration { + return + } + var cdrs []*engine.ExternalCDR + req := utils.RPCCDRsFilter{CGRIDs: []string{utils.Sha1("testdatagrp")}} + if err := apierRpc.Call("ApierV2.GetCdrs", req, &cdrs); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(cdrs) != 3 { + t.Error("Unexpected number of CDRs returned: ", len(cdrs)) + } +} + func TestDmtAgentDryRun1(t *testing.T) { if !*testIntegration { return diff --git a/agents/hapool_it_test.go b/agents/hapool_it_test.go new file mode 100644 index 000000000..aa4fac2e5 --- /dev/null +++ b/agents/hapool_it_test.go @@ -0,0 +1,124 @@ +/* +Real-time Charging System for Telecom & ISP environments +Copyright (C) ITsysCOM GmbH + +This program is free software: you can Storagetribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITH*out ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package agents + +/* +import ( + "os/exec" + "path" + "testing" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" +) + +var cgrRater1Cmd, cgrSmg1Cmd *exec.Cmd + +func TestHaPoolInitCfg(t *testing.T) { + if !*testIntegration { + return + } + daCfgPath = path.Join(*dataDir, "conf", "samples", "hapool", "cgrrater1") + // Init config first + var err error + daCfg, err = config.NewCGRConfigFromFolder(daCfgPath) + if err != nil { + t.Error(err) + } + daCfg.DataFolderPath = *dataDir // Share DataFolderPath through config towards StoreDb for Flush() + config.SetCgrConfig(daCfg) +} + +// Remove data in both rating and accounting db +func TestHaPoolResetDataDb(t *testing.T) { + TestDmtAgentResetDataDb(t) +} + +// Wipe out the cdr database +func TestHaPoolResetStorDb(t *testing.T) { + TestDmtAgentResetStorDb(t) +} + +// Start CGR Engine +func TestHaPoolStartEngine(t *testing.T) { + if !*testIntegration { + return + } + engine.KillEngine(*waitRater) // just to make sure + var err error + cgrRater1 := path.Join(*dataDir, "conf", "samples", "hapool", "cgrrater1") + if cgrRater1Cmd, err = engine.StartEngine(cgrRater1, *waitRater); err != nil { + t.Fatal("cgrRater1: ", err) + } + cgrRater2 := path.Join(*dataDir, "conf", "samples", "hapool", "cgrrater2") + if _, err = engine.StartEngine(cgrRater2, *waitRater); err != nil { + t.Fatal("cgrRater2: ", err) + } + cgrSmg1 := path.Join(*dataDir, "conf", "samples", "hapool", "cgrsmg1") + if cgrSmg1Cmd, err = engine.StartEngine(cgrSmg1, *waitRater); err != nil { + t.Fatal("cgrSmg1: ", err) + } + cgrSmg2 := path.Join(*dataDir, "conf", "samples", "hapool", "cgrsmg2") + if _, err = engine.StartEngine(cgrSmg2, *waitRater); err != nil { + t.Fatal("cgrSmg2: ", err) + } + cgrDa := path.Join(*dataDir, "conf", "samples", "hapool", "dagent") + if _, err = engine.StartEngine(cgrDa, *waitRater); err != nil { + t.Fatal("cgrDa: ", err) + } + +} + +// Connect rpc client to rater +func TestHaPoolApierRpcConn(t *testing.T) { + TestDmtAgentApierRpcConn(t) +} + +// Load the tariff plan, creating accounts and their balances +func TestHaPoolTPFromFolder(t *testing.T) { + TestDmtAgentTPFromFolder(t) +} + +// cgr-console 'cost Category="call" Tenant="cgrates.org" Subject="1001" Destination="1004" TimeStart="2015-11-07T08:42:26Z" TimeEnd="2015-11-07T08:47:26Z"' +func TestHaPoolSendCCRInit(t *testing.T) { + TestDmtAgentSendCCRInit(t) +} + +// cgr-console 'cost Category="call" Tenant="cgrates.org" Subject="1001" Destination="1004" TimeStart="2015-11-07T08:42:26Z" TimeEnd="2015-11-07T08:52:26Z"' +func TestHaPoolSendCCRUpdate(t *testing.T) { + TestDmtAgentSendCCRUpdate(t) +} + +// cgr-console 'cost Category="call" Tenant="cgrates.org" Subject="1001" Destination="1004" TimeStart="2015-11-07T08:42:26Z" TimeEnd="2015-11-07T08:57:26Z"' +func TestHaPoolSendCCRUpdate2(t *testing.T) { + TestDmtAgentSendCCRUpdate2(t) +} + +func TestHaPoolSendCCRTerminate(t *testing.T) { + TestDmtAgentSendCCRTerminate(t) +} + +func TestHaPoolCdrs(t *testing.T) { + TestDmtAgentCdrs(t) +} + +func TestHaPoolStopEngine(t *testing.T) { + TestDmtAgentStopEngine(t) +} +*/ diff --git a/agents/libdmt.go b/agents/libdmt.go index bb60b4f75..3467b980c 100644 --- a/agents/libdmt.go +++ b/agents/libdmt.go @@ -244,8 +244,8 @@ func metaValueExponent(m *diam.Message, argsTpl utils.RSRFields, roundingDecimal return strconv.FormatFloat(utils.Round(res, roundingDecimals, utils.ROUNDING_MIDDLE), 'f', -1, 64), nil } -func metaSum(m *diam.Message, argsTpl utils.RSRFields, roundingDecimals int) (string, error) { - valStr := composedFieldvalue(m, argsTpl, 0, nil) +func metaSum(m *diam.Message, argsTpl utils.RSRFields, passAtIndex, roundingDecimals int) (string, error) { + valStr := composedFieldvalue(m, argsTpl, passAtIndex, nil) handlerArgs := strings.Split(valStr, utils.HandlerArgSep) var summed float64 for _, arg := range handlerArgs { @@ -287,6 +287,11 @@ func passesFieldFilter(m *diam.Message, fieldFilter *utils.RSRField, processorVa if err != nil { return false, 0 } + if len(avps) == 0 { // No AVP found in request, treat it same as empty + if fieldFilter.FilterPasses("") { + return true, -1 + } + } for avpIdx, avpVal := range avps { // First match wins due to index if fieldFilter.FilterPasses(avpValAsString(avpVal)) { return true, avpIdx @@ -398,7 +403,7 @@ func fieldOutVal(m *diam.Message, cfgFld *config.CfgCdrField, extraParam interfa case META_VALUE_EXPONENT: outVal, err = metaValueExponent(m, cfgFld.Value, 10) // FixMe: add here configured number of decimals case META_SUM: - outVal, err = metaSum(m, cfgFld.Value, 10) + outVal, err = metaSum(m, cfgFld.Value, passAtIndex, 10) default: outVal, err = metaHandler(m, cfgFld.HandlerId, cfgFld.Layout, extraParam.(time.Duration)) if err != nil { diff --git a/agents/libdmt_test.go b/agents/libdmt_test.go index 7d6924c42..fcddb1ce1 100644 --- a/agents/libdmt_test.go +++ b/agents/libdmt_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/sessionmanager" "github.com/cgrates/cgrates/utils" "github.com/fiorix/go-diameter/diam" "github.com/fiorix/go-diameter/diam/avp" @@ -133,12 +134,12 @@ func TestMetaSum(t *testing.T) { }), }, }) - if val, err := metaSum(m, utils.ParseRSRFieldsMustCompile("Requested-Service-Unit>CC-Money>Unit-Value>Value-Digits;^|;Requested-Service-Unit>CC-Money>Unit-Value>Exponent", utils.INFIELD_SEP), 10); err != nil { + if val, err := metaSum(m, utils.ParseRSRFieldsMustCompile("Requested-Service-Unit>CC-Money>Unit-Value>Value-Digits;^|;Requested-Service-Unit>CC-Money>Unit-Value>Exponent", utils.INFIELD_SEP), 0, 10); err != nil { t.Error(err) } else if val != "9995" { t.Error("Received: ", val) } - if _, err = metaSum(m, utils.ParseRSRFieldsMustCompile("Requested-Service-Unit>CC-Money>Unit-Value>Value-Digits;Requested-Service-Unit>CC-Money>Unit-Value>Exponent", utils.INFIELD_SEP), 10); err == nil { + if _, err = metaSum(m, utils.ParseRSRFieldsMustCompile("Requested-Service-Unit>CC-Money>Unit-Value>Value-Digits;Requested-Service-Unit>CC-Money>Unit-Value>Exponent", utils.INFIELD_SEP), 0, 10); err == nil { t.Error("Should have received error") // Insufficient number arguments } } @@ -396,3 +397,86 @@ func TestCCASetProcessorAVPs(t *testing.T) { t.Errorf("Expecting: %+v, received: %+v", eMessage, ccaMsg) } } + +func TestCCRAsSMGenericEvent(t *testing.T) { + ccr := &CCR{ // Bare information, just the one needed for answer + SessionId: "ccrasgen1", + AuthApplicationId: 4, + CCRequestType: 3, + } + ccr.diamMessage = ccr.AsBareDiameterMessage() + ccr.diamMessage.NewAVP("Multiple-Services-Credit-Control", avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(446, avp.Mbit, 0, &diam.GroupedAVP{ // Used-Service-Unit + AVP: []*diam.AVP{ + diam.NewAVP(420, avp.Mbit, 0, datatype.Unsigned32(17)), // CC-Time + diam.NewAVP(412, avp.Mbit, 0, datatype.Unsigned64(1341)), // CC-Input-Octets + diam.NewAVP(414, avp.Mbit, 0, datatype.Unsigned64(3079)), // CC-Output-Octets + }, + }), + diam.NewAVP(432, avp.Mbit, 0, datatype.Unsigned32(99)), + }, + }) + ccr.diamMessage.NewAVP("Multiple-Services-Credit-Control", avp.Mbit, 0, &diam.GroupedAVP{ + AVP: []*diam.AVP{ + diam.NewAVP(446, avp.Mbit, 0, &diam.GroupedAVP{ // Used-Service-Unit + AVP: []*diam.AVP{ + diam.NewAVP(452, avp.Mbit, 0, datatype.Enumerated(0)), // Tariff-Change-Usage + diam.NewAVP(420, avp.Mbit, 0, datatype.Unsigned32(20)), // CC-Time + diam.NewAVP(412, avp.Mbit, 0, datatype.Unsigned64(8046)), // CC-Input-Octets + diam.NewAVP(414, avp.Mbit, 0, datatype.Unsigned64(46193)), // CC-Output-Octets + }, + }), + diam.NewAVP(432, avp.Mbit, 0, datatype.Unsigned32(1)), + }, + }) + ccr.diamMessage.NewAVP("FramedIPAddress", avp.Mbit, 0, datatype.OctetString("0AE40041")) + cfgFlds := make([]*config.CfgCdrField, 0) + eSMGEv := sessionmanager.SMGenericEvent{"EventName": "DIAMETER_CCR"} + if rSMGEv, err := ccr.AsSMGenericEvent(cfgFlds); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(eSMGEv, rSMGEv) { + t.Errorf("Expecting: %+v, received: %+v", eSMGEv, rSMGEv) + } + cfgFlds = []*config.CfgCdrField{ + &config.CfgCdrField{ + Tag: "LastUsed", + FieldFilter: utils.ParseRSRFieldsMustCompile("~Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets:s/^(.*)$/test/(test);Multiple-Services-Credit-Control>Rating-Group(1)", utils.INFIELD_SEP), + FieldId: "LastUsed", + Type: "*handler", + HandlerId: "*sum", + Value: utils.ParseRSRFieldsMustCompile("Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets;^|;Multiple-Services-Credit-Control>Used-Service-Unit>CC-Output-Octets", utils.INFIELD_SEP), + Mandatory: true, + }, + } + eSMGEv = sessionmanager.SMGenericEvent{"EventName": "DIAMETER_CCR", "LastUsed": "54239"} + if rSMGEv, err := ccr.AsSMGenericEvent(cfgFlds); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(eSMGEv, rSMGEv) { + t.Errorf("Expecting: %+v, received: %+v", eSMGEv, rSMGEv) + } + cfgFlds = []*config.CfgCdrField{ + &config.CfgCdrField{ + Tag: "LastUsed", + FieldFilter: utils.ParseRSRFieldsMustCompile("~Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets:s/^(.*)$/test/(test);Multiple-Services-Credit-Control>Rating-Group(99)", utils.INFIELD_SEP), + FieldId: "LastUsed", + Type: "*handler", + HandlerId: "*sum", + Value: utils.ParseRSRFieldsMustCompile("Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets;^|;Multiple-Services-Credit-Control>Used-Service-Unit>CC-Output-Octets", utils.INFIELD_SEP), + Mandatory: true, + }, + } + eSMGEv = sessionmanager.SMGenericEvent{"EventName": "DIAMETER_CCR", "LastUsed": "4420"} + if rSMGEv, err := ccr.AsSMGenericEvent(cfgFlds); err != nil { + t.Error(err) + } else if !reflect.DeepEqual(eSMGEv, rSMGEv) { + t.Errorf("Expecting: %+v, received: %+v", eSMGEv, rSMGEv) + } +} + +func TestPassesFieldFilter(t *testing.T) { + m := diam.NewRequest(diam.CreditControl, 4, nil) // Multiple-Services-Credit-Control>Rating-Group + if pass, _ := passesFieldFilter(m, utils.ParseRSRFieldsMustCompile("Multiple-Services-Credit-Control>Rating-Group(^$)", utils.INFIELD_SEP)[0], nil); !pass { + t.Error("Does not pass") + } +} diff --git a/apier/v1/accounts.go b/apier/v1/accounts.go index 6d006da25..620b64c48 100644 --- a/apier/v1/accounts.go +++ b/apier/v1/accounts.go @@ -318,7 +318,7 @@ func (self *ApierV1) GetAccounts(attr utils.AttrGetAccounts, reply *[]interface{ var accountKeys []string var err error if len(attr.AccountIds) == 0 { - if accountKeys, err = self.AccountDb.GetKeysForPrefix(utils.ACCOUNT_PREFIX + attr.Tenant); err != nil { + if accountKeys, err = self.AccountDb.GetKeysForPrefix(utils.ACCOUNT_PREFIX+attr.Tenant, true); err != nil { return err } } else { @@ -426,7 +426,7 @@ func (self *ApierV1) modifyBalance(aType string, attr *AttrAddBalance, reply *st Uuid: attr.BalanceUuid, ID: attr.BalanceId, Type: utils.StringPointer(attr.BalanceType), - Value: utils.Float64Pointer(attr.Value), + Value: &utils.ValueFormula{Static: attr.Value}, ExpirationDate: expTime, RatingSubject: attr.RatingSubject, Weight: attr.Weight, @@ -514,7 +514,6 @@ func (self *ApierV1) SetBalance(attr *AttrSetBalance, reply *string) error { Uuid: attr.BalanceUUID, ID: attr.BalanceID, Type: utils.StringPointer(attr.BalanceType), - Value: attr.Value, ExpirationDate: expTime, RatingSubject: attr.RatingSubject, Weight: attr.Weight, @@ -522,6 +521,9 @@ func (self *ApierV1) SetBalance(attr *AttrSetBalance, reply *string) error { Disabled: attr.Disabled, }, } + if attr.Value != nil { + a.Balance.Value = &utils.ValueFormula{Static: *attr.Value} + } if attr.Directions != nil { a.Balance.Directions = utils.StringMapPointer(utils.ParseStringMap(*attr.Directions)) } @@ -567,12 +569,11 @@ func (self *ApierV1) RemoveBalances(attr *AttrSetBalance, reply *string) error { at := &engine.ActionTiming{} at.SetAccountIDs(utils.StringMap{accID: true}) a := &engine.Action{ - ActionType: engine.SET_BALANCE, + ActionType: engine.REMOVE_BALANCE, Balance: &engine.BalanceFilter{ Uuid: attr.BalanceUUID, ID: attr.BalanceID, Type: utils.StringPointer(attr.BalanceType), - Value: attr.Value, ExpirationDate: expTime, RatingSubject: attr.RatingSubject, Weight: attr.Weight, @@ -580,6 +581,9 @@ func (self *ApierV1) RemoveBalances(attr *AttrSetBalance, reply *string) error { Disabled: attr.Disabled, }, } + if attr.Value != nil { + a.Balance.Value = &utils.ValueFormula{Static: *attr.Value} + } if attr.Directions != nil { a.Balance.Directions = utils.StringMapPointer(utils.ParseStringMap(*attr.Directions)) } diff --git a/apier/v1/aliases.go b/apier/v1/aliases.go index d736cd270..7800b6d72 100644 --- a/apier/v1/aliases.go +++ b/apier/v1/aliases.go @@ -20,6 +20,7 @@ package v1 import ( "errors" + "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" ) @@ -51,7 +52,7 @@ func (self *ApierV1) AddRatingSubjectAliases(attrs AttrAddRatingSubjectAliases, als := engine.Alias{Direction: utils.META_OUT, Tenant: attrs.Tenant, Category: attrs.Category, Account: alias, Subject: alias, Context: utils.ALIAS_CONTEXT_RATING, Values: engine.AliasValues{&engine.AliasValue{DestinationId: utils.META_ANY, Pairs: engine.AliasPairs{"Account": map[string]string{alias: attrs.Subject}, "Subject": map[string]string{alias: attrs.Subject}}, Weight: 10.0}}} - if err := aliases.SetAlias(als, &ignr); err != nil { + if err := aliases.Call("AliasesV1.SetAlias", &engine.AttrAddAlias{Alias: &als}, &ignr); err != nil { return utils.NewErrServerError(err) } } @@ -69,7 +70,7 @@ func (self *ApierV1) RemRatingSubjectAliases(tenantRatingSubject engine.TenantRa return errors.New("ALIASES_NOT_ENABLED") } var reverseAliases map[string][]*engine.Alias - if err := aliases.GetReverseAlias(engine.AttrReverseAlias{Target: "Subject", Alias: tenantRatingSubject.Subject, Context: utils.ALIAS_CONTEXT_RATING}, &reverseAliases); err != nil { + if err := aliases.Call("AliasesV1.GetReverseAlias", &engine.AttrReverseAlias{Target: "Subject", Alias: tenantRatingSubject.Subject, Context: utils.ALIAS_CONTEXT_RATING}, &reverseAliases); err != nil { return utils.NewErrServerError(err) } var ignr string @@ -78,7 +79,7 @@ func (self *ApierV1) RemRatingSubjectAliases(tenantRatingSubject engine.TenantRa if alias.Tenant != tenantRatingSubject.Tenant { continue // From another tenant } - if err := aliases.RemoveAlias(*alias, &ignr); err != nil { + if err := aliases.Call("AliasesV1.RemoveAlias", alias, &ignr); err != nil { return utils.NewErrServerError(err) } } @@ -103,7 +104,7 @@ func (self *ApierV1) AddAccountAliases(attrs AttrAddAccountAliases, reply *strin als := engine.Alias{Direction: utils.META_OUT, Tenant: attrs.Tenant, Category: attrs.Category, Account: alias, Subject: alias, Context: utils.ALIAS_CONTEXT_RATING, Values: engine.AliasValues{&engine.AliasValue{DestinationId: utils.META_ANY, Pairs: engine.AliasPairs{"Account": map[string]string{alias: attrs.Account}, "Subject": map[string]string{alias: attrs.Account}}, Weight: 10.0}}} - if err := aliases.SetAlias(als, &ignr); err != nil { + if err := aliases.Call("AliasesV1.SetAlias", &engine.AttrAddAlias{Alias: &als}, &ignr); err != nil { return utils.NewErrServerError(err) } } @@ -121,7 +122,7 @@ func (self *ApierV1) RemAccountAliases(tenantAccount engine.TenantAccount, reply return errors.New("ALIASES_NOT_ENABLED") } var reverseAliases map[string][]*engine.Alias - if err := aliases.GetReverseAlias(engine.AttrReverseAlias{Target: "Account", Alias: tenantAccount.Account, Context: utils.ALIAS_CONTEXT_RATING}, &reverseAliases); err != nil { + if err := aliases.Call("AliasesV1.GetReverseAlias", &engine.AttrReverseAlias{Target: "Account", Alias: tenantAccount.Account, Context: utils.ALIAS_CONTEXT_RATING}, &reverseAliases); err != nil { return utils.NewErrServerError(err) } var ignr string @@ -130,7 +131,7 @@ func (self *ApierV1) RemAccountAliases(tenantAccount engine.TenantAccount, reply if alias.Tenant != tenantAccount.Tenant { continue // From another tenant } - if err := aliases.RemoveAlias(*alias, &ignr); err != nil { + if err := aliases.Call("AliasesV1.RemoveAlias", alias, &ignr); err != nil { return utils.NewErrServerError(err) } } diff --git a/apier/v1/apier.go b/apier/v1/apier.go index 9afe5cf63..b72335757 100644 --- a/apier/v1/apier.go +++ b/apier/v1/apier.go @@ -33,6 +33,7 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/scheduler" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) const ( @@ -48,8 +49,9 @@ type ApierV1 struct { Sched *scheduler.Scheduler Config *config.CGRConfig Responder *engine.Responder - CdrStatsSrv engine.StatsInterface - Users engine.UserService + CdrStatsSrv rpcclient.RpcClientConnection + Users rpcclient.RpcClientConnection + CDRs rpcclient.RpcClientConnection // FixMe: populate it from cgr-engine } func (self *ApierV1) GetDestination(dstId string, reply *engine.Destination) error { @@ -103,11 +105,12 @@ func (self *ApierV1) GetRatingPlan(rplnId string, reply *engine.RatingPlan) erro } func (self *ApierV1) ExecuteAction(attr *utils.AttrExecuteAction, reply *string) error { - accID := utils.AccountKey(attr.Tenant, attr.Account) at := &engine.ActionTiming{ ActionsID: attr.ActionsId, } - at.SetAccountIDs(utils.StringMap{accID: true}) + if attr.Tenant != "" && attr.Account != "" { + at.SetAccountIDs(utils.StringMap{utils.AccountKey(attr.Tenant, attr.Account): true}) + } if err := at.Execute(); err != nil { *reply = err.Error() return err @@ -375,13 +378,13 @@ func (self *ApierV1) LoadTariffPlanFromStorDb(attrs AttrLoadTpFromStorDb, reply } if len(cstKeys) != 0 && self.CdrStatsSrv != nil { - if err := self.CdrStatsSrv.ReloadQueues(cstKeys, nil); err != nil { + if err := self.CdrStatsSrv.Call("CDRStatsV1.ReloadQueues", cstKeys, nil); err != nil { return err } } if len(userKeys) != 0 && self.Users != nil { var r string - if err := self.Users.ReloadUsers("", &r); err != nil { + if err := self.Users.Call("AliasV1.ReloadUsers", "", &r); err != nil { return err } } @@ -502,10 +505,10 @@ func (self *ApierV1) SetActions(attrs utils.AttrSetActions, reply *string) error } storeActions := make(engine.Actions, len(attrs.Actions)) for idx, apiAct := range attrs.Actions { - var units *float64 + var vf *utils.ValueFormula if apiAct.Units != "" { - if x, err := strconv.ParseFloat(apiAct.Units, 64); err == nil { - units = &x + if x, err := utils.ParseBalanceFilterValue(apiAct.Units); err == nil { + vf = x } else { return err } @@ -521,17 +524,17 @@ func (self *ApierV1) SetActions(attrs utils.AttrSetActions, reply *string) error } a := &engine.Action{ - Id: utils.GenUUID(), + Id: attrs.ActionsId, ActionType: apiAct.Identifier, Weight: apiAct.Weight, ExpirationString: apiAct.ExpiryTime, ExtraParameters: apiAct.ExtraParameters, Filter: apiAct.Filter, Balance: &engine.BalanceFilter{ // TODO: update this part - Uuid: utils.StringPointer(utils.GenUUID()), + Uuid: utils.StringPointer(apiAct.BalanceUuid), ID: utils.StringPointer(apiAct.BalanceId), Type: utils.StringPointer(apiAct.BalanceType), - Value: units, + Value: vf, Weight: weight, Directions: utils.StringMapPointer(utils.ParseStringMap(apiAct.Directions)), DestinationIDs: utils.StringMapPointer(utils.ParseStringMap(apiAct.DestinationIds)), @@ -560,7 +563,8 @@ func (self *ApierV1) GetActions(actsId string, reply *[]*utils.TPAction) error { return utils.NewErrServerError(err) } for _, engAct := range engActs { - act := &utils.TPAction{Identifier: engAct.ActionType, + act := &utils.TPAction{ + Identifier: engAct.ActionType, ExpiryTime: engAct.ExpirationString, ExtraParameters: engAct.ExtraParameters, Filter: engAct.Filter, @@ -808,23 +812,23 @@ func (self *ApierV1) GetCacheStats(attrs utils.AttrCacheStats, reply *utils.Cach cs.DerivedChargers = cache2go.CountEntries(utils.DERIVEDCHARGERS_PREFIX) cs.LcrProfiles = cache2go.CountEntries(utils.LCR_PREFIX) cs.Aliases = cache2go.CountEntries(utils.ALIASES_PREFIX) - if self.CdrStatsSrv != nil && self.Config.CDRStatsEnabled { + if self.CdrStatsSrv != nil { var queueIds []string - if err := self.CdrStatsSrv.GetQueueIds(0, &queueIds); err != nil { + if err := self.CdrStatsSrv.Call("CDRStatsV1.GetQueueIds", 0, &queueIds); err != nil { return utils.NewErrServerError(err) } cs.CdrStats = len(queueIds) } - if self.Config.RaterUserServer == utils.INTERNAL { + if self.Users != nil { var ups engine.UserProfiles - if err := self.Users.GetUsers(engine.UserProfile{}, &ups); err != nil { + if err := self.Users.Call("UsersV1.GetUsers", &engine.UserProfile{}, &ups); err != nil { return utils.NewErrServerError(err) } cs.Users = len(ups) } if loadHistInsts, err := self.AccountDb.GetLoadHistory(1, false); err != nil || len(loadHistInsts) == 0 { if err != nil { // Not really an error here since we only count in cache - utils.Logger.Err(fmt.Sprintf("ApierV1.GetCacheStats, error on GetLoadHistory: %s", err.Error())) + utils.Logger.Warning(fmt.Sprintf("ApierV1.GetCacheStats, error on GetLoadHistory: %s", err.Error())) } cs.LastLoadId = utils.NOT_AVAILABLE cs.LastLoadTime = utils.NOT_AVAILABLE @@ -960,13 +964,14 @@ func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, self.Sched.Reload(true) } if len(cstKeys) != 0 && self.CdrStatsSrv != nil { - if err := self.CdrStatsSrv.ReloadQueues(cstKeys, nil); err != nil { + var out int + if err := self.CdrStatsSrv.Call("CDRStatsV1.ReloadQueues", cstKeys, &out); err != nil { return err } } if len(userKeys) != 0 && self.Users != nil { var r string - if err := self.Users.ReloadUsers("", &r); err != nil { + if err := self.Users.Call("UsersV1.ReloadUsers", "", &r); err != nil { return err } } @@ -1060,3 +1065,66 @@ func (self *ApierV1) GetLoadHistory(attrs utils.Paginator, reply *[]*engine.Load *reply = loadHist[offset:nrItems] return nil } + +type AttrRemActions struct { + ActionIDs []string +} + +func (self *ApierV1) RemActions(attr AttrRemActions, reply *string) error { + if attr.ActionIDs == nil { + err := utils.ErrNotFound + *reply = err.Error() + return err + } + // The check could lead to very long execution time. So we decided to leave it at the user's risck.' + /* + stringMap := utils.NewStringMap(attr.ActionIDs...) + keys, err := self.RatingDb.GetKeysForPrefix(utils.ACTION_TRIGGER_PREFIX, true) + if err != nil { + *reply = err.Error() + return err + } + for _, key := range keys { + getAttrs, err := self.RatingDb.GetActionTriggers(key[len(utils.ACTION_TRIGGER_PREFIX):]) + if err != nil { + *reply = err.Error() + return err + } + for _, atr := range getAttrs { + if _, found := stringMap[atr.ActionsID]; found { + // found action trigger referencing action; abort + err := fmt.Errorf("action %s refenced by action trigger %s", atr.ActionsID, atr.ID) + *reply = err.Error() + return err + } + } + } + allAplsMap, err := self.RatingDb.GetAllActionPlans() + if err != nil && err != utils.ErrNotFound { + *reply = err.Error() + return err + } + for _, apl := range allAplsMap { + for _, atm := range apl.ActionTimings { + if _, found := stringMap[atm.ActionsID]; found { + err := fmt.Errorf("action %s refenced by action plan %s", atm.ActionsID, apl.Id) + *reply = err.Error() + return err + } + } + + } + */ + for _, aID := range attr.ActionIDs { + if err := self.RatingDb.RemoveActions(aID); err != nil { + *reply = err.Error() + return err + } + } + if err := self.RatingDb.CacheRatingPrefixes(utils.ACTION_PREFIX); err != nil { + *reply = err.Error() + return err + } + *reply = utils.OK + return nil +} diff --git a/apier/v1/apier_local_test.go b/apier/v1/apier_local_test.go index 47da716b5..7bc966260 100644 --- a/apier/v1/apier_local_test.go +++ b/apier/v1/apier_local_test.go @@ -1274,7 +1274,7 @@ func TestApierLoadTariffPlanFromFolder(t *testing.T) { } else if reply != "OK" { t.Error("Calling ApierV1.LoadTariffPlanFromFolder got reply: ", reply) } - time.Sleep(time.Duration(*waitRater) * time.Millisecond) + time.Sleep(time.Duration(2**waitRater) * time.Millisecond) } func TestApierResetDataAfterLoadFromFolder(t *testing.T) { @@ -1294,11 +1294,11 @@ func TestApierResetDataAfterLoadFromFolder(t *testing.T) { if err := rater.Call("ApierV1.GetCacheStats", args, &rcvStats); err != nil { t.Error("Got error on ApierV1.GetCacheStats: ", err.Error()) } else { - if rcvStats.Destinations != 4 || - rcvStats.RatingPlans != 3 || - rcvStats.RatingProfiles != 3 || - rcvStats.Actions != 6 || - rcvStats.DerivedChargers != 2 { + if rcvStats.Destinations != 5 || + rcvStats.RatingPlans != 5 || + rcvStats.RatingProfiles != 5 || + rcvStats.Actions != 11 || + rcvStats.DerivedChargers != 3 { t.Errorf("Calling ApierV1.GetCacheStats received: %+v", rcvStats) } } @@ -1359,7 +1359,7 @@ func TestApierGetCallCostLog(t *testing.T) { } attrs.CgrId = "dummyid" attrs.RunId = "default" - if err := rater.Call("ApierV1.GetCallCostLog", attrs, &cc); err == nil || err.Error() != "SERVER_ERROR: record not found" { + if err := rater.Call("ApierV1.GetCallCostLog", attrs, &cc); err == nil || err.Error() != utils.ErrNotFound.Error() { t.Error("ApierV1.GetCallCostLog: should return NOT_FOUND, got:", err) } } diff --git a/apier/v1/cdrs.go b/apier/v1/cdrs.go index fe52b49ab..a4ce0b5af 100644 --- a/apier/v1/cdrs.go +++ b/apier/v1/cdrs.go @@ -19,6 +19,7 @@ along with this program. If not, see package v1 import ( + "errors" "fmt" "github.com/cgrates/cgrates/engine" @@ -26,19 +27,19 @@ import ( ) // Retrieves the callCost out of CGR logDb -func (apier *ApierV1) GetCallCostLog(attrs utils.AttrGetCallCost, reply *engine.CallCost) error { +func (apier *ApierV1) GetCallCostLog(attrs utils.AttrGetCallCost, reply *engine.SMCost) error { if attrs.CgrId == "" { return utils.NewErrMandatoryIeMissing("CgrId") } if attrs.RunId == "" { attrs.RunId = utils.META_DEFAULT } - if cc, err := apier.CdrDb.GetCallCostLog(attrs.CgrId, attrs.RunId); err != nil { + if smcs, err := apier.CdrDb.GetSMCosts(attrs.CgrId, attrs.RunId, "", ""); err != nil { return utils.NewErrServerError(err) - } else if cc == nil { + } else if len(smcs) == 0 { return utils.ErrNotFound } else { - *reply = *cc + *reply = *smcs[0] } return nil } @@ -85,3 +86,11 @@ func (apier *ApierV1) RemoveCDRs(attrs utils.RPCCDRsFilter, reply *string) error *reply = "OK" return nil } + +// New way of (re-)rating CDRs +func (apier *ApierV1) RateCDRs(attrs utils.AttrRateCDRs, reply *string) error { + if apier.CDRs == nil { + return errors.New("CDRS_NOT_ENABLED") + } + return apier.CDRs.Call("CDRsV1.RateCDRs", attrs, reply) +} diff --git a/apier/v1/cdrstatsv1.go b/apier/v1/cdrstatsv1.go index 6b6409be0..6e1f5122e 100644 --- a/apier/v1/cdrstatsv1.go +++ b/apier/v1/cdrstatsv1.go @@ -23,11 +23,12 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) // Interact with Stats server type CDRStatsV1 struct { - CdrStats engine.StatsInterface + CdrStats rpcclient.RpcClientConnection } type AttrGetMetrics struct { @@ -38,31 +39,32 @@ func (sts *CDRStatsV1) GetMetrics(attr AttrGetMetrics, reply *map[string]float64 if len(attr.StatsQueueId) == 0 { return fmt.Errorf("%s:StatsQueueId", utils.ErrMandatoryIeMissing.Error()) } - return sts.CdrStats.GetValues(attr.StatsQueueId, reply) + return sts.CdrStats.Call("CDRStatsV1.GetValues", attr.StatsQueueId, reply) } func (sts *CDRStatsV1) GetQueueIds(empty string, reply *[]string) error { - return sts.CdrStats.GetQueueIds(0, reply) + return sts.CdrStats.Call("CDRStatsV1.GetQueueIds", 0, reply) } func (sts *CDRStatsV1) GetQueue(id string, sq *engine.StatsQueue) error { - return sts.CdrStats.GetQueue(id, sq) + return sts.CdrStats.Call("CDRStatsV1.GetQueue", id, sq) } func (sts *CDRStatsV1) AddQueue(cs *engine.CdrStats, reply *int) error { - return sts.CdrStats.AddQueue(cs, reply) + return sts.CdrStats.Call("CDRStatsV1.AddQueue", cs, reply) } func (sts *CDRStatsV1) RemoveQueue(qID string, reply *int) error { - return sts.CdrStats.RemoveQueue(qID, reply) + return sts.CdrStats.Call("CDRStatsV1.RemoveQueue", qID, reply) } func (sts *CDRStatsV1) GetQueueTriggers(id string, ats *engine.ActionTriggers) error { - return sts.CdrStats.GetQueueTriggers(id, ats) + return sts.CdrStats.Call("CDRStatsV1.GetQueueTriggers", id, ats) } func (sts *CDRStatsV1) ReloadQueues(attr utils.AttrCDRStatsReloadQueues, reply *string) error { - if err := sts.CdrStats.ReloadQueues(attr.StatsQueueIds, nil); err != nil { + var out int + if err := sts.CdrStats.Call("CDRStatsV1.ReloadQueues", attr.StatsQueueIds, &out); err != nil { return err } *reply = utils.OK @@ -70,7 +72,8 @@ func (sts *CDRStatsV1) ReloadQueues(attr utils.AttrCDRStatsReloadQueues, reply * } func (sts *CDRStatsV1) ResetQueues(attr utils.AttrCDRStatsReloadQueues, reply *string) error { - if err := sts.CdrStats.ResetQueues(attr.StatsQueueIds, nil); err != nil { + var out int + if err := sts.CdrStats.Call("CDRStatsV1.ResetQueues", attr.StatsQueueIds, &out); err != nil { return err } *reply = utils.OK diff --git a/apier/v1/cdrstatsv1_local_test.go b/apier/v1/cdrstatsv1_local_test.go index b9a2b7354..e1cd0493b 100644 --- a/apier/v1/cdrstatsv1_local_test.go +++ b/apier/v1/cdrstatsv1_local_test.go @@ -104,6 +104,13 @@ func TestCDRStatsLclGetQueueIds2(t *testing.T) { } else if len(eQueueIds) != len(queueIds) { t.Errorf("Expecting: %v, received: %v", eQueueIds, queueIds) } + var rcvMetrics map[string]float64 + expectedMetrics := map[string]float64{"ASR": -1, "ACD": -1} + if err := cdrstRpc.Call("CDRStatsV1.GetMetrics", AttrGetMetrics{StatsQueueId: "CDRST4"}, &rcvMetrics); err != nil { + t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error()) + } else if !reflect.DeepEqual(expectedMetrics, rcvMetrics) { + t.Errorf("Expecting: %v, received: %v", expectedMetrics, rcvMetrics) + } } func TestCDRStatsLclPostCdrs(t *testing.T) { @@ -112,28 +119,28 @@ func TestCDRStatsLclPostCdrs(t *testing.T) { } httpClient := new(http.Client) storedCdrs := []*engine.CDR{ - &engine.CDR{CGRID: utils.Sha1("dsafdsafa", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsaf", + &engine.CDR{CGRID: utils.Sha1("dsafdsafa", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsafa", OriginHost: "192.168.1.1", Source: "test", RequestType: utils.META_RATED, Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "+4986517174963", SetupTime: time.Now(), AnswerTime: time.Now(), RunID: utils.DEFAULT_RUNID, Usage: time.Duration(10) * time.Second, ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, Cost: 1.01, }, - &engine.CDR{CGRID: utils.Sha1("dsafdsafb", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsaf", + &engine.CDR{CGRID: utils.Sha1("dsafdsafb", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsafb", OriginHost: "192.168.1.1", Source: "test", RequestType: utils.META_RATED, Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "+4986517174963", SetupTime: time.Now(), AnswerTime: time.Now(), RunID: utils.DEFAULT_RUNID, Usage: time.Duration(5) * time.Second, ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, Cost: 1.01, }, - &engine.CDR{CGRID: utils.Sha1("dsafdsafc", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsaf", + &engine.CDR{CGRID: utils.Sha1("dsafdsafc", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsafc", OriginHost: "192.168.1.1", Source: "test", RequestType: utils.META_RATED, Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "+4986517174963", SetupTime: time.Now(), AnswerTime: time.Now(), RunID: utils.DEFAULT_RUNID, Usage: time.Duration(30) * time.Second, ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, Cost: 1.01, }, - &engine.CDR{CGRID: utils.Sha1("dsafdsafd", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsaf", + &engine.CDR{CGRID: utils.Sha1("dsafdsafd", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), OrderID: 123, ToR: utils.VOICE, OriginID: "dsafdsafd", OriginHost: "192.168.1.1", Source: "test", RequestType: utils.META_RATED, Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "+4986517174963", SetupTime: time.Now(), AnswerTime: time.Time{}, @@ -147,7 +154,6 @@ func TestCDRStatsLclPostCdrs(t *testing.T) { } } time.Sleep(time.Duration(*waitRater) * time.Millisecond) - } func TestCDRStatsLclGetMetrics1(t *testing.T) { @@ -205,3 +211,12 @@ func TestCDRStatsLclResetMetrics(t *testing.T) { t.Errorf("Expecting: %v, received: %v", expectedMetrics2, rcvMetrics2) } } + +func TestCDRStatsLclKillEngine(t *testing.T) { + if !*testLocal { + return + } + if err := engine.KillEngine(*waitRater); err != nil { + t.Error(err) + } +} diff --git a/apier/v1/cdrsv1.go b/apier/v1/cdrsv1.go index db5681a03..c6b93d504 100644 --- a/apier/v1/cdrsv1.go +++ b/apier/v1/cdrsv1.go @@ -30,7 +30,7 @@ type CdrsV1 struct { // Designed for CGR internal usage func (self *CdrsV1) ProcessCdr(cdr *engine.CDR, reply *string) error { - if err := self.CdrSrv.ProcessCdr(cdr); err != nil { + if err := self.CdrSrv.LocalProcessCdr(cdr); err != nil { return utils.NewErrServerError(err) } *reply = utils.OK @@ -46,7 +46,7 @@ func (self *CdrsV1) ProcessExternalCdr(cdr *engine.ExternalCDR, reply *string) e return nil } -// Remotely start mediation with specific runid, runs asynchronously, it's status will be displayed in syslog +// Remotely (re)rating, deprecated func (self *CdrsV1) RateCdrs(attrs utils.AttrRateCdrs, reply *string) error { cdrsFltr, err := attrs.AsCDRsFilter(self.CdrSrv.Timezone()) if err != nil { @@ -59,10 +59,6 @@ func (self *CdrsV1) RateCdrs(attrs utils.AttrRateCdrs, reply *string) error { return nil } -func (self *CdrsV1) LogCallCost(ccl *engine.CallCostLog, reply *string) error { - if err := self.CdrSrv.LogCallCost(ccl); err != nil { - return utils.NewErrServerError(err) - } - *reply = utils.OK - return nil +func (self *CdrsV1) StoreSMCost(attr engine.AttrCDRSStoreSMCost, reply *string) error { + return self.CdrSrv.V1StoreSMCost(attr, reply) } diff --git a/apier/v1/smgenericv1.go b/apier/v1/smgenericv1.go index eed4310fb..1460bfaa2 100644 --- a/apier/v1/smgenericv1.go +++ b/apier/v1/smgenericv1.go @@ -97,6 +97,9 @@ func (self *SMGenericV1) ActiveSessions(attrs utils.AttrSMGGetActiveSessions, re if attrs.ToR != nil && *attrs.ToR != as.TOR { continue } + if attrs.OriginID != nil && *attrs.OriginID != as.OriginID { + continue + } if attrs.RunID != nil && *attrs.RunID != as.RunId { continue } diff --git a/apier/v1/smgenericv1_it_test.go b/apier/v1/smgenericv1_it_test.go index 6afd87eb4..17da136f8 100644 --- a/apier/v1/smgenericv1_it_test.go +++ b/apier/v1/smgenericv1_it_test.go @@ -116,7 +116,7 @@ func TestSMGV1CacheStats(t *testing.T) { } var rcvStats *utils.CacheStats - expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 7, ActionPlans: 4, SharedGroups: 1, Aliases: 1, + expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 8, ActionPlans: 4, SharedGroups: 1, Aliases: 1, DerivedChargers: 1, LcrProfiles: 5, CdrStats: 6, Users: 3, LastLoadId: smgV1LoadInst.LoadId, LastLoadTime: smgV1LoadInst.LoadTime.Format(time.RFC3339)} var args utils.AttrCacheStats if err := smgV1Rpc.Call("ApierV2.GetCacheStats", args, &rcvStats); err != nil { diff --git a/apier/v1/triggers.go b/apier/v1/triggers.go index 642c137c7..93d9e9ed1 100644 --- a/apier/v1/triggers.go +++ b/apier/v1/triggers.go @@ -129,7 +129,15 @@ func (self *ApierV1) RemoveAccountActionTriggers(attr AttrRemoveAccountActionTri return nil } -func (self *ApierV1) ResetAccountActionTriggers(attr AttrRemoveAccountActionTriggers, reply *string) error { +type AttrResetAccountActionTriggers struct { + Tenant string + Account string + GroupID string + UniqueID string + Executed bool +} + +func (self *ApierV1) ResetAccountActionTriggers(attr AttrResetAccountActionTriggers, reply *string) error { if missing := utils.MissingStructFields(&attr, []string{"Tenant", "Account"}); len(missing) != 0 { return utils.NewErrMandatoryIeMissing(missing...) @@ -146,11 +154,13 @@ func (self *ApierV1) ResetAccountActionTriggers(attr AttrRemoveAccountActionTrig if (attr.UniqueID == "" || at.UniqueID == attr.UniqueID) && (attr.GroupID == "" || at.ID == attr.GroupID) { // reset action trigger - at.Executed = false + at.Executed = attr.Executed } } - account.ExecuteActionTriggers(nil) + if attr.Executed == false { + account.ExecuteActionTriggers(nil) + } if err := self.AccountDb.SetAccount(account); err != nil { return 0, err } @@ -175,7 +185,7 @@ type AttrSetAccountActionTriggers struct { MinSleep *string ExpirationDate *string ActivationDate *string - BalanceId *string + BalanceID *string BalanceType *string BalanceDirections *[]string BalanceDestinationIds *[]string @@ -188,7 +198,7 @@ type AttrSetAccountActionTriggers struct { BalanceBlocker *bool BalanceDisabled *bool MinQueuedItems *int - ActionsId *string + ActionsID *string } func (self *ApierV1) SetAccountActionTriggers(attr AttrSetAccountActionTriggers, reply *string) error { @@ -238,8 +248,9 @@ func (self *ApierV1) SetAccountActionTriggers(attr AttrSetAccountActionTriggers, } at.ActivationDate = actTime } - if attr.BalanceId != nil { - at.Balance.ID = attr.BalanceId + at.Balance = &engine.BalanceFilter{} + if attr.BalanceID != nil { + at.Balance.ID = attr.BalanceID } if attr.BalanceType != nil { at.Balance.Type = attr.BalanceType @@ -281,8 +292,8 @@ func (self *ApierV1) SetAccountActionTriggers(attr AttrSetAccountActionTriggers, if attr.MinQueuedItems != nil { at.MinQueuedItems = *attr.MinQueuedItems } - if attr.ActionsId != nil { - at.ActionsID = *attr.ActionsId + if attr.ActionsID != nil { + at.ActionsID = *attr.ActionsID } } @@ -300,3 +311,180 @@ func (self *ApierV1) SetAccountActionTriggers(attr AttrSetAccountActionTriggers, *reply = utils.OK return nil } + +type AttrSetActionTrigger struct { + GroupID string + UniqueID string + ThresholdType *string + ThresholdValue *float64 + Recurrent *bool + MinSleep *string + ExpirationDate *string + ActivationDate *string + BalanceID *string + BalanceType *string + BalanceDirections *[]string + BalanceDestinationIds *[]string + BalanceWeight *float64 + BalanceExpirationDate *string + BalanceTimingTags *[]string + BalanceRatingSubject *string + BalanceCategories *[]string + BalanceSharedGroups *[]string + BalanceBlocker *bool + BalanceDisabled *bool + MinQueuedItems *int + ActionsID *string +} + +func (self *ApierV1) SetActionTrigger(attr AttrSetActionTrigger, reply *string) error { + + if missing := utils.MissingStructFields(&attr, []string{"GroupID"}); len(missing) != 0 { + return utils.NewErrMandatoryIeMissing(missing...) + } + + atrs, _ := self.RatingDb.GetActionTriggers(attr.GroupID) + var newAtr *engine.ActionTrigger + if attr.UniqueID != "" { + //search for exiting one + for _, atr := range atrs { + if atr.UniqueID == attr.UniqueID { + newAtr = atr + break + } + } + } + + if newAtr == nil { + newAtr = &engine.ActionTrigger{} + atrs = append(atrs, newAtr) + } + newAtr.ID = attr.GroupID + if attr.UniqueID != "" { + newAtr.UniqueID = attr.UniqueID + } else { + newAtr.UniqueID = utils.GenUUID() + } + + if attr.ThresholdType != nil { + newAtr.ThresholdType = *attr.ThresholdType + } + if attr.ThresholdValue != nil { + newAtr.ThresholdValue = *attr.ThresholdValue + } + if attr.Recurrent != nil { + newAtr.Recurrent = *attr.Recurrent + } + if attr.MinSleep != nil { + minSleep, err := utils.ParseDurationWithSecs(*attr.MinSleep) + if err != nil { + *reply = err.Error() + return err + } + newAtr.MinSleep = minSleep + } + if attr.ExpirationDate != nil { + expTime, err := utils.ParseTimeDetectLayout(*attr.ExpirationDate, self.Config.DefaultTimezone) + if err != nil { + *reply = err.Error() + return err + } + newAtr.ExpirationDate = expTime + } + if attr.ActivationDate != nil { + actTime, err := utils.ParseTimeDetectLayout(*attr.ActivationDate, self.Config.DefaultTimezone) + if err != nil { + *reply = err.Error() + return err + } + newAtr.ActivationDate = actTime + } + newAtr.Balance = &engine.BalanceFilter{} + if attr.BalanceID != nil { + newAtr.Balance.ID = attr.BalanceID + } + if attr.BalanceType != nil { + newAtr.Balance.Type = attr.BalanceType + } + if attr.BalanceDirections != nil { + newAtr.Balance.Directions = utils.StringMapPointer(utils.NewStringMap(*attr.BalanceDirections...)) + } + if attr.BalanceDestinationIds != nil { + newAtr.Balance.DestinationIDs = utils.StringMapPointer(utils.NewStringMap(*attr.BalanceDestinationIds...)) + } + if attr.BalanceWeight != nil { + newAtr.Balance.Weight = attr.BalanceWeight + } + if attr.BalanceExpirationDate != nil { + balanceExpTime, err := utils.ParseDate(*attr.BalanceExpirationDate) + if err != nil { + *reply = err.Error() + return err + } + newAtr.Balance.ExpirationDate = &balanceExpTime + } + if attr.BalanceTimingTags != nil { + newAtr.Balance.TimingIDs = utils.StringMapPointer(utils.NewStringMap(*attr.BalanceTimingTags...)) + } + if attr.BalanceRatingSubject != nil { + newAtr.Balance.RatingSubject = attr.BalanceRatingSubject + } + if attr.BalanceCategories != nil { + newAtr.Balance.Categories = utils.StringMapPointer(utils.NewStringMap(*attr.BalanceCategories...)) + } + if attr.BalanceSharedGroups != nil { + newAtr.Balance.SharedGroups = utils.StringMapPointer(utils.NewStringMap(*attr.BalanceSharedGroups...)) + } + if attr.BalanceBlocker != nil { + newAtr.Balance.Blocker = attr.BalanceBlocker + } + if attr.BalanceDisabled != nil { + newAtr.Balance.Disabled = attr.BalanceDisabled + } + if attr.MinQueuedItems != nil { + newAtr.MinQueuedItems = *attr.MinQueuedItems + } + if attr.ActionsID != nil { + newAtr.ActionsID = *attr.ActionsID + } + + if err := self.RatingDb.SetActionTriggers(attr.GroupID, atrs); err != nil { + *reply = err.Error() + return err + } + //no cache for action triggers + *reply = utils.OK + return nil +} + +type AttrGetActionTriggers struct { + GroupIDs []string +} + +func (self *ApierV1) GetActionTriggers(attr AttrGetActionTriggers, atrs *engine.ActionTriggers) error { + var allAttrs engine.ActionTriggers + if len(attr.GroupIDs) > 0 { + for _, key := range attr.GroupIDs { + getAttrs, err := self.RatingDb.GetActionTriggers(key) + if err != nil { + return err + } + allAttrs = append(allAttrs, getAttrs...) + } + + } else { + keys, err := self.RatingDb.GetKeysForPrefix(utils.ACTION_TRIGGER_PREFIX, true) + if err != nil { + return err + } + for _, key := range keys { + getAttrs, err := self.RatingDb.GetActionTriggers(key[len(utils.ACTION_TRIGGER_PREFIX):]) + if err != nil { + return err + } + allAttrs = append(allAttrs, getAttrs...) + } + } + *atrs = allAttrs + return nil +} diff --git a/apier/v2/accounts.go b/apier/v2/accounts.go index c3c495edd..83d3d56f2 100644 --- a/apier/v2/accounts.go +++ b/apier/v2/accounts.go @@ -33,7 +33,7 @@ func (self *ApierV2) GetAccounts(attr utils.AttrGetAccounts, reply *[]*engine.Ac var accountKeys []string var err error if len(attr.AccountIds) == 0 { - if accountKeys, err = self.AccountDb.GetKeysForPrefix(utils.ACCOUNT_PREFIX + utils.ConcatenatedKey(attr.Tenant)); err != nil { + if accountKeys, err = self.AccountDb.GetKeysForPrefix(utils.ACCOUNT_PREFIX+attr.Tenant, true); err != nil { return err } } else { diff --git a/apier/v2/apier.go b/apier/v2/apier.go index 90b146480..e337400b0 100644 --- a/apier/v2/apier.go +++ b/apier/v2/apier.go @@ -21,6 +21,7 @@ package v2 import ( "errors" "fmt" + "math" "os" "path" "strings" @@ -220,14 +221,12 @@ func (self *ApierV2) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, dcsKeys[idx] = utils.DERIVEDCHARGERS_PREFIX + dc } aps, _ := loader.GetLoadedIds(utils.ACTION_PLAN_PREFIX) - utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading cache.") + utils.Logger.Info("ApierV2.LoadTariffPlanFromFolder, reloading cache.") cstKeys, _ := loader.GetLoadedIds(utils.CDR_STATS_PREFIX) userKeys, _ := loader.GetLoadedIds(utils.USERS_PREFIX) li := loader.GetLoadInstance() - - // release the tp data - loader.Init() + loader.Init() // release the tp data if err := self.RatingDb.CacheRatingPrefixValues(map[string][]string{ utils.DESTINATION_PREFIX: dstKeys, @@ -247,21 +246,76 @@ func (self *ApierV2) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, return err } if len(aps) != 0 && self.Sched != nil { - utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading scheduler.") + utils.Logger.Info("ApierV2.LoadTariffPlanFromFolder, reloading scheduler.") self.Sched.Reload(true) } if len(cstKeys) != 0 && self.CdrStatsSrv != nil { - if err := self.CdrStatsSrv.ReloadQueues(cstKeys, nil); err != nil { + var out int + if err := self.CdrStatsSrv.Call("CDRStatsV1.ReloadQueues", cstKeys, &out); err != nil { return err } } - if len(userKeys) != 0 && self.Users != nil { var r string - if err := self.Users.ReloadUsers("", &r); err != nil { + if err := self.Users.Call("UsersV1.ReloadUsers", "", &r); err != nil { return err } } *reply = *li return nil } + +type AttrGetActions struct { + ActionIDs []string + Offset int // Set the item offset + Limit int // Limit number of items retrieved +} + +// Retrieves actions attached to specific ActionsId within cache +func (self *ApierV2) GetActions(attr AttrGetActions, reply *map[string]engine.Actions) error { + var actionKeys []string + var err error + if len(attr.ActionIDs) == 0 { + if actionKeys, err = self.AccountDb.GetKeysForPrefix(utils.ACTION_PREFIX, false); err != nil { + return err + } + } else { + for _, accID := range attr.ActionIDs { + if len(accID) == 0 { // Source of error returned from redis (key not found) + continue + } + actionKeys = append(actionKeys, utils.ACCOUNT_PREFIX+accID) + } + } + if len(actionKeys) == 0 { + return nil + } + if attr.Offset > len(actionKeys) { + attr.Offset = len(actionKeys) + } + if attr.Offset < 0 { + attr.Offset = 0 + } + var limitedActions []string + if attr.Limit != 0 { + max := math.Min(float64(attr.Offset+attr.Limit), float64(len(actionKeys))) + limitedActions = actionKeys[attr.Offset:int(max)] + } else { + limitedActions = actionKeys[attr.Offset:] + } + retActions := make(map[string]engine.Actions) + for _, accKey := range limitedActions { + key := accKey[len(utils.ACTION_PREFIX):] + acts, err := self.RatingDb.GetActions(key, false) + if err != nil { + return utils.NewErrServerError(err) + } + if len(acts) > 0 { + retActions[key] = acts + + } + } + + *reply = retActions + return nil +} diff --git a/apier/v2/cdrs_mysql_local_test.go b/apier/v2/cdrs_mysql_local_test.go index bbbed922e..60fddfc70 100644 --- a/apier/v2/cdrs_mysql_local_test.go +++ b/apier/v2/cdrs_mysql_local_test.go @@ -247,6 +247,7 @@ func TestV2CDRsMySQLRateWithoutTP(t *testing.T) { if !*testLocal { return } + //"d32a571d7bcbc6700fd35c1c0c5c6f458a62e260" rawCdrCGRID := utils.Sha1("bbb1", time.Date(2015, 11, 21, 10, 47, 24, 0, time.UTC).String()) // Rate the injected CDR, should not rate it since we have no TP loaded attrs := utils.AttrRateCdrs{CgrIds: []string{rawCdrCGRID}} diff --git a/cache2go/response_cache.go b/cache2go/response_cache.go index 4ac57e3f7..c7631cc57 100644 --- a/cache2go/response_cache.go +++ b/cache2go/response_cache.go @@ -18,7 +18,7 @@ type CacheItem struct { type ResponseCache struct { ttl time.Duration cache map[string]*CacheItem - semaphore map[string]chan bool + semaphore map[string]chan bool // used for waiting till the first goroutine processes the response mu sync.RWMutex } @@ -32,6 +32,7 @@ func NewResponseCache(ttl time.Duration) *ResponseCache { } func (rc *ResponseCache) Cache(key string, item *CacheItem) { + //utils.Logger.Info("key: " + key) if rc.ttl == 0 { return } @@ -54,13 +55,21 @@ func (rc *ResponseCache) Get(key string) (*CacheItem, error) { if rc.ttl == 0 { return nil, utils.ErrNotImplemented } + rc.mu.RLock() + item, ok := rc.cache[key] + rc.mu.RUnlock() + if ok { + //utils.Logger.Info(",,,,,,,,,,,,,,,,,,,,,Found key: " + key) + return item, nil + } rc.wait(key) // wait for other goroutine processsing this key rc.mu.RLock() defer rc.mu.RUnlock() - item, ok := rc.cache[key] + item, ok = rc.cache[key] if !ok { return nil, ErrNotFound } + //utils.Logger.Info("............................Found key: " + key) return item, nil } diff --git a/cdrc/cdrc.go b/cdrc/cdrc.go index 3b3b0656e..2b35dd828 100644 --- a/cdrc/cdrc.go +++ b/cdrc/cdrc.go @@ -32,6 +32,7 @@ import ( "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" "gopkg.in/fsnotify.v1" ) @@ -54,7 +55,7 @@ Common parameters within configs processed: Parameters specific per config instance: * duMultiplyFactor, cdrSourceId, cdrFilter, cdrFields */ -func NewCdrc(cdrcCfgs map[string]*config.CdrcConfig, httpSkipTlsCheck bool, cdrs engine.Connector, closeChan chan struct{}, dfltTimezone string) (*Cdrc, error) { +func NewCdrc(cdrcCfgs []*config.CdrcConfig, httpSkipTlsCheck bool, cdrs rpcclient.RpcClientConnection, closeChan chan struct{}, dfltTimezone string) (*Cdrc, error) { var cdrcCfg *config.CdrcConfig for _, cdrcCfg = range cdrcCfgs { // Take the first config out, does not matter which one break @@ -82,10 +83,10 @@ func NewCdrc(cdrcCfgs map[string]*config.CdrcConfig, httpSkipTlsCheck bool, cdrs type Cdrc struct { httpSkipTlsCheck bool - cdrcCfgs map[string]*config.CdrcConfig // All cdrc config profiles attached to this CDRC (key will be profile instance name) + cdrcCfgs []*config.CdrcConfig // All cdrc config profiles attached to this CDRC (key will be profile instance name) dfltCdrcCfg *config.CdrcConfig timezone string - cdrs engine.Connector + cdrs rpcclient.RpcClientConnection httpClient *http.Client closeChan chan struct{} // Used to signal config reloads when we need to span different CDRC-Client maxOpenFiles chan struct{} // Maximum number of simultaneous files processed @@ -201,7 +202,7 @@ func (self *Cdrc) processFile(filePath string) error { utils.Logger.Info(fmt.Sprintf(" DryRun CDR: %+v", storedCdr)) continue } - if err := self.cdrs.ProcessCdr(storedCdr, &reply); err != nil { + if err := self.cdrs.Call("Responder.ProcessCdr", storedCdr, &reply); err != nil { utils.Logger.Err(fmt.Sprintf(" Failed sending CDR, %+v, error: %s", storedCdr, err.Error())) } else if reply != "OK" { utils.Logger.Err(fmt.Sprintf(" Received unexpected reply for CDR, %+v, reply: %s", storedCdr, reply)) diff --git a/cdrc/cdrc_local_test.go b/cdrc/cdrc_local_test.go deleted file mode 100644 index 1fa731eae..000000000 --- a/cdrc/cdrc_local_test.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Rating system designed to be used in VoIP Carriers World -Copyright (C) 2012-2015 ITsysCOM - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see -*/ - -package cdrc - -import ( - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "testing" - "time" - - "github.com/cgrates/cgrates/config" - "github.com/cgrates/cgrates/engine" - "github.com/cgrates/cgrates/utils" -) - -/* -README: - - Enable local tests by passing '-local' to the go test command - It is expected that the data folder of CGRateS exists at path /usr/share/cgrates/data or passed via command arguments. - Prior running the tests, create database and users by running: - mysql -pyourrootpwd < /usr/share/cgrates/data/storage/mysql/create_db_with_users.sql - What these tests do: - * Flush tables in storDb. - * Start engine with default configuration and give it some time to listen (here caching can slow down). - * -*/ - -var cfgPath string -var cfg *config.CGRConfig -var cdrcCfgs map[string]*config.CdrcConfig -var cdrcCfg *config.CdrcConfig - -var testLocal = flag.Bool("local", false, "Perform the tests only on local test environment, not by default.") // This flag will be passed here via "go test -local" args -var dataDir = flag.String("data_dir", "/usr/share/cgrates", "CGR data dir path here") -var storDbType = flag.String("stordb_type", "mysql", "The type of the storDb database ") -var waitRater = flag.Int("wait_rater", 300, "Number of miliseconds to wait for rater to start and cache") - -var fileContent1 = `accid11,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -accid12,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -dummy_data -accid13,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -` - -var fileContent2 = `accid21,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -accid22,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -#accid1,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1 -accid23,prepaid,out,cgrates.org,call,1001,1001,+4986517174963,2013-02-03 19:54:00,62,supplier1,172.16.1.1` - -var fileContent3 = `accid31;prepaid;out;cgrates.org;call;1001;1001;+4986517174963;2013-02-03 19:54:00;62;supplier1;172.16.1.1 -accid32;prepaid;out;cgrates.org;call;1001;1001;+4986517174963;2013-02-03 19:54:00;62;supplier1;172.16.1.1 -#accid1;prepaid;out;cgrates.org;call;1001;1001;+4986517174963;2013-02-03 19:54:00;62;supplier1;172.16.1.1 -accid33;prepaid;out;cgrates.org;call;1001;1001;+4986517174963;2013-02-03 19:54:00;62;supplier1;172.16.1.1` - -func startEngine() error { - enginePath, err := exec.LookPath("cgr-engine") - if err != nil { - return errors.New("Cannot find cgr-engine executable") - } - stopEngine() - engine := exec.Command(enginePath, "-config", cfgPath) - if err := engine.Start(); err != nil { - return fmt.Errorf("Cannot start cgr-engine: %s", err.Error()) - } - time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time to rater to fire up - return nil -} - -func stopEngine() error { - exec.Command("pkill", "cgr-engine").Run() // Just to make sure another one is not running, bit brutal maybe we can fine tune it - return nil -} - -// Need it here and not in init since Travis has no possibility to load local file -func TestCsvLclLoadConfigt(*testing.T) { - if !*testLocal { - return - } - cfgPath = path.Join(*dataDir, "conf", "samples", "apier") - cfg, _ = config.NewCGRConfigFromFolder(cfgPath) - if len(cfg.CdrcProfiles) > 0 { - cdrcCfgs = cfg.CdrcProfiles["/var/log/cgrates/cdrc/in"] - } -} - -func TestCsvLclEmptyTables(t *testing.T) { - if !*testLocal { - return - } - if *storDbType != utils.MYSQL { - t.Fatal("Unsupported storDbType") - } - mysql, err := engine.NewMySQLStorage(cfg.StorDBHost, cfg.StorDBPort, cfg.StorDBName, cfg.StorDBUser, cfg.StorDBPass, cfg.StorDBMaxOpenConns, cfg.StorDBMaxIdleConns) - if err != nil { - t.Fatal("Error on opening database connection: ", err) - } - for _, scriptName := range []string{utils.CREATE_CDRS_TABLES_SQL, utils.CREATE_TARIFFPLAN_TABLES_SQL} { - if err := mysql.CreateTablesFromScript(path.Join(*dataDir, "storage", *storDbType, scriptName)); err != nil { - t.Fatal("Error on mysql creation: ", err.Error()) - return // No point in going further - } - } - if _, err := mysql.Db.Query(fmt.Sprintf("SELECT 1 from %s", utils.TBL_CDRS)); err != nil { - t.Fatal(err.Error()) - } -} - -// Creates cdr files and starts the engine -func TestCsvLclCreateCdrFiles(t *testing.T) { - if !*testLocal { - return - } - if cdrcCfgs == nil { - t.Fatal("Empty default cdrc configuration") - } - for _, cdrcCfg = range cdrcCfgs { // Take the first config out, does not matter which one - break - } - if err := os.RemoveAll(cdrcCfg.CdrInDir); err != nil { - t.Fatal("Error removing folder: ", cdrcCfg.CdrInDir, err) - } - if err := os.MkdirAll(cdrcCfg.CdrInDir, 0755); err != nil { - t.Fatal("Error creating folder: ", cdrcCfg.CdrInDir, err) - } - if err := os.RemoveAll(cdrcCfg.CdrOutDir); err != nil { - t.Fatal("Error removing folder: ", cdrcCfg.CdrOutDir, err) - } - if err := os.MkdirAll(cdrcCfg.CdrOutDir, 0755); err != nil { - t.Fatal("Error creating folder: ", cdrcCfg.CdrOutDir, err) - } - if err := ioutil.WriteFile(path.Join(cdrcCfg.CdrInDir, "file1.csv"), []byte(fileContent1), 0644); err != nil { - t.Fatal(err.Error) - } - if err := ioutil.WriteFile(path.Join(cdrcCfg.CdrInDir, "file2.csv"), []byte(fileContent2), 0644); err != nil { - t.Fatal(err.Error) - } - -} - -func TestCsvLclProcessCdrDir(t *testing.T) { - if !*testLocal { - return - } - var cdrcCfg *config.CdrcConfig - for _, cdrcCfg = range cdrcCfgs { // Take the first config out, does not matter which one - break - } - if cdrcCfg.Cdrs == utils.INTERNAL { // For now we only test over network - cdrcCfg.Cdrs = "127.0.0.1:2013" - } - if err := startEngine(); err != nil { - t.Fatal(err.Error()) - } - cdrc, err := NewCdrc(cdrcCfgs, true, nil, make(chan struct{}), "") - if err != nil { - t.Fatal(err.Error()) - } - if err := cdrc.processCdrDir(); err != nil { - t.Error(err) - } - stopEngine() -} - -// Creates cdr files and starts the engine -func TestCsvLclCreateCdr3File(t *testing.T) { - if !*testLocal { - return - } - if err := os.RemoveAll(cdrcCfg.CdrInDir); err != nil { - t.Fatal("Error removing folder: ", cdrcCfg.CdrInDir, err) - } - if err := os.MkdirAll(cdrcCfg.CdrInDir, 0755); err != nil { - t.Fatal("Error creating folder: ", cdrcCfg.CdrInDir, err) - } - if err := ioutil.WriteFile(path.Join(cdrcCfg.CdrInDir, "file3.csv"), []byte(fileContent3), 0644); err != nil { - t.Fatal(err.Error) - } -} - -func TestCsvLclProcessCdr3Dir(t *testing.T) { - if !*testLocal { - return - } - if cdrcCfg.Cdrs == utils.INTERNAL { // For now we only test over network - cdrcCfg.Cdrs = "127.0.0.1:2013" - } - if err := startEngine(); err != nil { - t.Fatal(err.Error()) - } - cdrc, err := NewCdrc(cdrcCfgs, true, nil, make(chan struct{}), "") - if err != nil { - t.Fatal(err.Error()) - } - if err := cdrc.processCdrDir(); err != nil { - t.Error(err) - } - stopEngine() -} diff --git a/cdrc/cdrc_test.go b/cdrc/cdrc_test.go index ac515b468..e79af9382 100644 --- a/cdrc/cdrc_test.go +++ b/cdrc/cdrc_test.go @@ -156,7 +156,7 @@ BYE|3111f3c9|49ca4c42|a58ebaae40d08d6757d8424fb09c4c54@0:0:0:0:0:0:0:0|200|OK|14 }} cdrc := &Cdrc{CdrFormat: utils.OSIPS_FLATSTORE, cdrSourceIds: []string{"TEST_CDRC"}, failedCallsPrefix: "missed_calls", cdrFields: cdrFields, partialRecords: make(map[string]map[string]*PartialFlatstoreRecord), - guard: engine.NewGuardianLock()} + guard: engine.Guardian} cdrsContent := bytes.NewReader([]byte(flatstoreCdrs)) csvReader := csv.NewReader(cdrsContent) csvReader.Comma = '|' @@ -283,7 +283,7 @@ INVITE|324cb497|d4af7023|8deaadf2ae9a17809a391f05af31afb0@0:0:0:0:0:0:0:0|486|Bu }} cdrc := &Cdrc{CdrFormat: utils.OSIPS_FLATSTORE, cdrSourceIds: []string{"TEST_CDRC"}, failedCallsPrefix: "missed_calls", cdrFields: cdrFields, partialRecords: make(map[string]map[string]*PartialFlatstoreRecord), - guard: engine.NewGuardianLock()} + guard: engine.Guardian} cdrsContent := bytes.NewReader([]byte(flatstoreCdrs)) csvReader := csv.NewReader(cdrsContent) csvReader.Comma = '|' diff --git a/cdrc/csv.go b/cdrc/csv.go index c6daaaee0..00e9bd823 100644 --- a/cdrc/csv.go +++ b/cdrc/csv.go @@ -20,6 +20,7 @@ package cdrc import ( "encoding/csv" + "encoding/json" "errors" "fmt" "os" @@ -93,7 +94,7 @@ func pairToRecord(part1, part2 *PartialFlatstoreRecord) ([]string, error) { func NewPartialRecordsCache(ttl time.Duration, cdrOutDir string, csvSep rune) (*PartialRecordsCache, error) { return &PartialRecordsCache{ttl: ttl, cdrOutDir: cdrOutDir, csvSep: csvSep, - partialRecords: make(map[string]map[string]*PartialFlatstoreRecord), guard: engine.NewGuardianLock()}, nil + partialRecords: make(map[string]map[string]*PartialFlatstoreRecord), guard: engine.Guardian}, nil } type PartialRecordsCache struct { @@ -180,7 +181,7 @@ func (self *PartialRecordsCache) UncachePartial(fileName string, pr *PartialFlat } func NewCsvRecordsProcessor(csvReader *csv.Reader, timezone, fileName string, - dfltCdrcCfg *config.CdrcConfig, cdrcCfgs map[string]*config.CdrcConfig, + dfltCdrcCfg *config.CdrcConfig, cdrcCfgs []*config.CdrcConfig, httpSkipTlsCheck bool, partialRecordsCache *PartialRecordsCache) *CsvRecordsProcessor { return &CsvRecordsProcessor{csvReader: csvReader, timezone: timezone, fileName: fileName, dfltCdrcCfg: dfltCdrcCfg, cdrcCfgs: cdrcCfgs, @@ -193,7 +194,7 @@ type CsvRecordsProcessor struct { timezone string // Timezone for CDRs which are not clearly specifying it fileName string dfltCdrcCfg *config.CdrcConfig - cdrcCfgs map[string]*config.CdrcConfig + cdrcCfgs []*config.CdrcConfig processedRecordsNr int64 // Number of content records in file httpSkipTlsCheck bool partialRecordsCache *PartialRecordsCache // Shared by cdrc so we can cache for all files in a folder @@ -246,8 +247,8 @@ func (self *CsvRecordsProcessor) processPartialRecord(record []string) ([]string // Takes the record from a slice and turns it into StoredCdrs, posting them to the cdrServer func (self *CsvRecordsProcessor) processRecord(record []string) ([]*engine.CDR, error) { - recordCdrs := make([]*engine.CDR, 0) // More CDRs based on the number of filters and field templates - for cdrcId, cdrcCfg := range self.cdrcCfgs { // cdrFields coming from more templates will produce individual storCdr records + recordCdrs := make([]*engine.CDR, 0) // More CDRs based on the number of filters and field templates + for _, cdrcCfg := range self.cdrcCfgs { // cdrFields coming from more templates will produce individual storCdr records // Make sure filters are matching filterBreak := false for _, rsrFilter := range cdrcCfg.CdrFilter { @@ -264,12 +265,12 @@ func (self *CsvRecordsProcessor) processRecord(record []string) ([]*engine.CDR, if filterBreak { // Stop importing cdrc fields profile due to non matching filter continue } - if storedCdr, err := self.recordToStoredCdr(record, cdrcId); err != nil { + if storedCdr, err := self.recordToStoredCdr(record, cdrcCfg); err != nil { return nil, fmt.Errorf("Failed converting to StoredCdr, error: %s", err.Error()) } else { recordCdrs = append(recordCdrs, storedCdr) } - if !self.cdrcCfgs[cdrcId].ContinueOnSuccess { + if !cdrcCfg.ContinueOnSuccess { break } } @@ -277,11 +278,11 @@ func (self *CsvRecordsProcessor) processRecord(record []string) ([]*engine.CDR, } // Takes the record out of csv and turns it into storedCdr which can be processed by CDRS -func (self *CsvRecordsProcessor) recordToStoredCdr(record []string, cdrcId string) (*engine.CDR, error) { - storedCdr := &engine.CDR{OriginHost: "0.0.0.0", Source: self.cdrcCfgs[cdrcId].CdrSourceId, ExtraFields: make(map[string]string), Cost: -1} +func (self *CsvRecordsProcessor) recordToStoredCdr(record []string, cdrcCfg *config.CdrcConfig) (*engine.CDR, error) { + storedCdr := &engine.CDR{OriginHost: "0.0.0.0", Source: cdrcCfg.CdrSourceId, ExtraFields: make(map[string]string), Cost: -1} var err error var lazyHttpFields []*config.CfgCdrField - for _, cdrFldCfg := range self.cdrcCfgs[cdrcId].ContentFields { + for _, cdrFldCfg := range cdrcCfg.ContentFields { if utils.IsSliceMember([]string{utils.KAM_FLATSTORE, utils.OSIPS_FLATSTORE}, self.dfltCdrcCfg.CdrFormat) { // Hardcode some values in case of flatstore switch cdrFldCfg.FieldId { case utils.ACCID: @@ -314,8 +315,8 @@ func (self *CsvRecordsProcessor) recordToStoredCdr(record []string, cdrcId strin } } storedCdr.CGRID = utils.Sha1(storedCdr.OriginID, storedCdr.SetupTime.UTC().String()) - if storedCdr.ToR == utils.DATA && self.cdrcCfgs[cdrcId].DataUsageMultiplyFactor != 0 { - storedCdr.Usage = time.Duration(float64(storedCdr.Usage.Nanoseconds()) * self.cdrcCfgs[cdrcId].DataUsageMultiplyFactor) + if storedCdr.ToR == utils.DATA && cdrcCfg.DataUsageMultiplyFactor != 0 { + storedCdr.Usage = time.Duration(float64(storedCdr.Usage.Nanoseconds()) * cdrcCfg.DataUsageMultiplyFactor) } for _, httpFieldCfg := range lazyHttpFields { // Lazy process the http fields var outValByte []byte @@ -323,7 +324,12 @@ func (self *CsvRecordsProcessor) recordToStoredCdr(record []string, cdrcId strin for _, rsrFld := range httpFieldCfg.Value { httpAddr += rsrFld.ParseValue("") } - if outValByte, err = utils.HttpJsonPost(httpAddr, self.httpSkipTlsCheck, storedCdr); err != nil && httpFieldCfg.Mandatory { + var jsn []byte + jsn, err = json.Marshal(storedCdr) + if err != nil { + return nil, err + } + if outValByte, err = utils.HttpJsonPost(httpAddr, self.httpSkipTlsCheck, jsn); err != nil && httpFieldCfg.Mandatory { return nil, err } else { fieldVal = string(outValByte) diff --git a/cdrc/csv_it_test.go b/cdrc/csv_it_test.go new file mode 100644 index 000000000..b2cf53b2d --- /dev/null +++ b/cdrc/csv_it_test.go @@ -0,0 +1,201 @@ +/* +Rating system designed to be used in VoIP Carriers World +Copyright (C) 2012-2015 ITsysCOM + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package cdrc + +import ( + "flag" + "io/ioutil" + "net/rpc" + "net/rpc/jsonrpc" + "os" + "path" + "testing" + "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" +) + +/* +README: + + Enable local tests by passing '-local' to the go test command + It is expected that the data folder of CGRateS exists at path /usr/share/cgrates/data or passed via command arguments. + Prior running the tests, create database and users by running: + mysql -pyourrootpwd < /usr/share/cgrates/data/storage/mysql/create_db_with_users.sql + What these tests do: + * Flush tables in storDb. + * Start engine with default configuration and give it some time to listen (here caching can slow down). + * +*/ + +var csvCfgPath string +var csvCfg *config.CGRConfig +var cdrcCfgs []*config.CdrcConfig +var cdrcCfg *config.CdrcConfig +var cdrcRpc *rpc.Client + +var testLocal = flag.Bool("local", false, "Perform the tests only on local test environment, not by default.") // This flag will be passed here via "go test -local" args +var testIT = flag.Bool("integration", false, "Perform the tests only on local test environment, not by default.") // This flag will be passed here via "go test -local" args +var dataDir = flag.String("data_dir", "/usr/share/cgrates", "CGR data dir path here") +var waitRater = flag.Int("wait_rater", 300, "Number of miliseconds to wait for rater to start and cache") + +var fileContent1 = `dbafe9c8614c785a65aabd116dd3959c3c56f7f6,default,*voice,dsafdsaf,*rated,*out,cgrates.org,call,1001,1001,+4986517174963,2013-11-07 08:42:25 +0000 UTC,2013-11-07 08:42:26 +0000 UTC,10s,1.0100,val_extra3,"",val_extra1 +dbafe9c8614c785a65aabd116dd3959c3c56f7f7,default,*voice,dsafdsag,*rated,*out,cgrates.org,call,1001,1001,+4986517174964,2013-11-07 09:42:25 +0000 UTC,2013-11-07 09:42:26 +0000 UTC,20s,1.0100,val_extra3,"",val_extra1 +` + +var fileContent2 = `accid21;*prepaid;itsyscom.com;1001;086517174963;2013-02-03 19:54:00;62;val_extra3;"";val_extra1 +accid22;*postpaid;itsyscom.com;1001;+4986517174963;2013-02-03 19:54:00;123;val_extra3;"";val_extra1 +#accid1;*pseudoprepaid;itsyscom.com;1001;+4986517174963;2013-02-03 19:54:00;12;val_extra3;"";val_extra1 +accid23;*rated;cgrates.org;1001;086517174963;2013-02-03 19:54:00;26;val_extra3;"";val_extra1` + +func TestCsvITInitConfig(t *testing.T) { + if !*testIT { + return + } + var err error + csvCfgPath = path.Join(*dataDir, "conf", "samples", "cdrccsv") + if csvCfg, err = config.NewCGRConfigFromFolder(csvCfgPath); err != nil { + t.Fatal("Got config error: ", err.Error()) + } +} + +// InitDb so we can rely on count +func TestCsvITInitCdrDb(t *testing.T) { + if !*testIT { + return + } + if err := engine.InitStorDb(csvCfg); err != nil { + t.Fatal(err) + } +} + +func TestCsvITCreateCdrDirs(t *testing.T) { + if !*testIT { + return + } + for _, cdrcProfiles := range csvCfg.CdrcProfiles { + for _, cdrcInst := range cdrcProfiles { + for _, dir := range []string{cdrcInst.CdrInDir, cdrcInst.CdrOutDir} { + if err := os.RemoveAll(dir); err != nil { + t.Fatal("Error removing folder: ", dir, err) + } + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal("Error creating folder: ", dir, err) + } + } + } + } +} + +func TestCsvITStartEngine(t *testing.T) { + if !*testIT { + return + } + if _, err := engine.StopStartEngine(csvCfgPath, *waitRater); err != nil { + t.Fatal(err) + } +} + +// Connect rpc client to rater +func TestCsvITRpcConn(t *testing.T) { + if !*testIT { + return + } + var err error + cdrcRpc, err = jsonrpc.Dial("tcp", csvCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed + if err != nil { + t.Fatal("Could not connect to rater: ", err.Error()) + } +} + +// The default scenario, out of cdrc defined in .cfg file +func TestCsvITHandleCdr1File(t *testing.T) { + if !*testIT { + return + } + fileName := "file1.csv" + tmpFilePath := path.Join("/tmp", fileName) + if err := ioutil.WriteFile(tmpFilePath, []byte(fileContent1), 0644); err != nil { + t.Fatal(err.Error) + } + if err := os.Rename(tmpFilePath, path.Join("/tmp/cdrctests/csvit1/in", fileName)); err != nil { + t.Fatal("Error moving file to processing directory: ", err) + } +} + +// Scenario out of first .xml config +func TestCsvITHandleCdr2File(t *testing.T) { + if !*testIT { + return + } + fileName := "file2.csv" + tmpFilePath := path.Join("/tmp", fileName) + if err := ioutil.WriteFile(tmpFilePath, []byte(fileContent2), 0644); err != nil { + t.Fatal(err.Error) + } + if err := os.Rename(tmpFilePath, path.Join("/tmp/cdrctests/csvit2/in", fileName)); err != nil { + t.Fatal("Error moving file to processing directory: ", err) + } +} + +func TestCsvITProcessedFiles(t *testing.T) { + if !*testIT { + return + } + time.Sleep(time.Duration(2**waitRater) * time.Millisecond) + if outContent1, err := ioutil.ReadFile("/tmp/cdrctests/csvit1/out/file1.csv"); err != nil { + t.Error(err) + } else if fileContent1 != string(outContent1) { + t.Errorf("Expecting: %q, received: %q", fileContent1, string(outContent1)) + } + if outContent2, err := ioutil.ReadFile("/tmp/cdrctests/csvit2/out/file2.csv"); err != nil { + t.Error(err) + } else if fileContent2 != string(outContent2) { + t.Errorf("Expecting: %q, received: %q", fileContent1, string(outContent2)) + } +} + +func TestCsvITAnalyseCDRs(t *testing.T) { + if !*testIT { + return + } + var reply []*engine.ExternalCDR + if err := cdrcRpc.Call("ApierV2.GetCdrs", utils.RPCCDRsFilter{}, &reply); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(reply) != 6 { // 1 injected, 1 rated, 1 *raw and it's pair in *default run + t.Error("Unexpected number of CDRs returned: ", len(reply)) + } + if err := cdrcRpc.Call("ApierV2.GetCdrs", utils.RPCCDRsFilter{DestinationPrefixes: []string{"08651"}}, &reply); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(reply) != 0 { // Original 08651 was converted + t.Error("Unexpected number of CDRs returned: ", len(reply)) + } + +} + +func TestCsvITKillEngine(t *testing.T) { + if !*testIT { + return + } + if err := engine.KillEngine(*waitRater); err != nil { + t.Error(err) + } +} diff --git a/cdrc/csv_test.go b/cdrc/csv_test.go index 066f564db..24458cbd0 100644 --- a/cdrc/csv_test.go +++ b/cdrc/csv_test.go @@ -30,21 +30,21 @@ import ( func TestCsvRecordForkCdr(t *testing.T) { cgrConfig, _ := config.NewDefaultCGRConfig() - cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][utils.META_DEFAULT] + cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][0] cdrcConfig.CdrSourceId = "TEST_CDRC" cdrcConfig.ContentFields = append(cdrcConfig.ContentFields, &config.CfgCdrField{Tag: "SupplierTest", Type: utils.META_COMPOSED, FieldId: utils.SUPPLIER, Value: []*utils.RSRField{&utils.RSRField{Id: "14"}}}) cdrcConfig.ContentFields = append(cdrcConfig.ContentFields, &config.CfgCdrField{Tag: "DisconnectCauseTest", Type: utils.META_COMPOSED, FieldId: utils.DISCONNECT_CAUSE, Value: []*utils.RSRField{&utils.RSRField{Id: "16"}}}) // - csvProcessor := &CsvRecordsProcessor{dfltCdrcCfg: cdrcConfig, cdrcCfgs: map[string]*config.CdrcConfig{"*default": cdrcConfig}} + csvProcessor := &CsvRecordsProcessor{dfltCdrcCfg: cdrcConfig, cdrcCfgs: []*config.CdrcConfig{cdrcConfig}} cdrRow := []string{"firstField", "secondField"} - _, err := csvProcessor.recordToStoredCdr(cdrRow, "*default") + _, err := csvProcessor.recordToStoredCdr(cdrRow, cdrcConfig) if err == nil { t.Error("Failed to corectly detect missing fields from record") } cdrRow = []string{"ignored", "ignored", utils.VOICE, "acc1", utils.META_PREPAID, "*out", "cgrates.org", "call", "1001", "1001", "+4986517174963", "2013-02-03 19:50:00", "2013-02-03 19:54:00", "62", "supplier1", "172.16.1.1", "NORMAL_DISCONNECT"} - rtCdr, err := csvProcessor.recordToStoredCdr(cdrRow, "*default") + rtCdr, err := csvProcessor.recordToStoredCdr(cdrRow, cdrcConfig) if err != nil { t.Error("Failed to parse CDR in rated cdr", err) } @@ -76,14 +76,14 @@ func TestCsvRecordForkCdr(t *testing.T) { func TestCsvDataMultiplyFactor(t *testing.T) { cgrConfig, _ := config.NewDefaultCGRConfig() - cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][utils.META_DEFAULT] + cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][0] cdrcConfig.CdrSourceId = "TEST_CDRC" cdrcConfig.ContentFields = []*config.CfgCdrField{&config.CfgCdrField{Tag: "TORField", Type: utils.META_COMPOSED, FieldId: utils.TOR, Value: []*utils.RSRField{&utils.RSRField{Id: "0"}}}, &config.CfgCdrField{Tag: "UsageField", Type: utils.META_COMPOSED, FieldId: utils.USAGE, Value: []*utils.RSRField{&utils.RSRField{Id: "1"}}}} - csvProcessor := &CsvRecordsProcessor{dfltCdrcCfg: cdrcConfig, cdrcCfgs: map[string]*config.CdrcConfig{"*default": cdrcConfig}} - csvProcessor.cdrcCfgs["*default"].DataUsageMultiplyFactor = 0 + csvProcessor := &CsvRecordsProcessor{dfltCdrcCfg: cdrcConfig, cdrcCfgs: []*config.CdrcConfig{cdrcConfig}} + csvProcessor.cdrcCfgs[0].DataUsageMultiplyFactor = 0 cdrRow := []string{"*data", "1"} - rtCdr, err := csvProcessor.recordToStoredCdr(cdrRow, "*default") + rtCdr, err := csvProcessor.recordToStoredCdr(cdrRow, cdrcConfig) if err != nil { t.Error("Failed to parse CDR in rated cdr", err) } @@ -100,7 +100,7 @@ func TestCsvDataMultiplyFactor(t *testing.T) { if !reflect.DeepEqual(expectedCdr, rtCdr) { t.Errorf("Expected: \n%v, \nreceived: \n%v", expectedCdr, rtCdr) } - csvProcessor.cdrcCfgs["*default"].DataUsageMultiplyFactor = 1024 + csvProcessor.cdrcCfgs[0].DataUsageMultiplyFactor = 1024 expectedCdr = &engine.CDR{ CGRID: utils.Sha1("", sTime.String()), ToR: cdrRow[0], @@ -110,7 +110,7 @@ func TestCsvDataMultiplyFactor(t *testing.T) { ExtraFields: map[string]string{}, Cost: -1, } - if rtCdr, _ := csvProcessor.recordToStoredCdr(cdrRow, "*default"); !reflect.DeepEqual(expectedCdr, rtCdr) { + if rtCdr, _ := csvProcessor.recordToStoredCdr(cdrRow, cdrcConfig); !reflect.DeepEqual(expectedCdr, rtCdr) { t.Errorf("Expected: \n%v, \nreceived: \n%v", expectedCdr, rtCdr) } cdrRow = []string{"*voice", "1"} @@ -123,7 +123,7 @@ func TestCsvDataMultiplyFactor(t *testing.T) { ExtraFields: map[string]string{}, Cost: -1, } - if rtCdr, _ := csvProcessor.recordToStoredCdr(cdrRow, "*default"); !reflect.DeepEqual(expectedCdr, rtCdr) { + if rtCdr, _ := csvProcessor.recordToStoredCdr(cdrRow, cdrcConfig); !reflect.DeepEqual(expectedCdr, rtCdr) { t.Errorf("Expected: \n%v, \nreceived: \n%v", expectedCdr, rtCdr) } } diff --git a/cdrc/flatstore_local_test.go b/cdrc/flatstore_local_test.go index 44c3dcdc7..15653d577 100644 --- a/cdrc/flatstore_local_test.go +++ b/cdrc/flatstore_local_test.go @@ -85,7 +85,11 @@ func TestFlatstoreLclCreateCdrFiles(t *testing.T) { if flatstoreCfg == nil { t.Fatal("Empty default cdrc configuration") } - flatstoreCdrcCfg = flatstoreCfg.CdrcProfiles["/tmp/cgr_flatstore/cdrc/in"]["FLATSTORE"] + for _, cdrcCfg := range flatstoreCfg.CdrcProfiles["/tmp/cgr_flatstore/cdrc/in"] { + if cdrcCfg.ID == "FLATSTORE" { + flatstoreCdrcCfg = cdrcCfg + } + } if err := os.RemoveAll(flatstoreCdrcCfg.CdrInDir); err != nil { t.Fatal("Error removing folder: ", flatstoreCdrcCfg.CdrInDir, err) } diff --git a/cdrc/fwv.go b/cdrc/fwv.go index b1fffe60d..d672b58eb 100644 --- a/cdrc/fwv.go +++ b/cdrc/fwv.go @@ -20,16 +20,18 @@ package cdrc import ( "bufio" + "encoding/json" "fmt" - "github.com/cgrates/cgrates/config" - "github.com/cgrates/cgrates/engine" - "github.com/cgrates/cgrates/utils" "io" "net/http" "os" "strconv" "strings" "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" ) func fwvValue(cdrLine string, indexStart, width int, padding string) string { @@ -47,14 +49,14 @@ func fwvValue(cdrLine string, indexStart, width int, padding string) string { return rawVal } -func NewFwvRecordsProcessor(file *os.File, dfltCfg *config.CdrcConfig, cdrcCfgs map[string]*config.CdrcConfig, httpClient *http.Client, httpSkipTlsCheck bool, timezone string) *FwvRecordsProcessor { +func NewFwvRecordsProcessor(file *os.File, dfltCfg *config.CdrcConfig, cdrcCfgs []*config.CdrcConfig, httpClient *http.Client, httpSkipTlsCheck bool, timezone string) *FwvRecordsProcessor { return &FwvRecordsProcessor{file: file, cdrcCfgs: cdrcCfgs, dfltCfg: dfltCfg, httpSkipTlsCheck: httpSkipTlsCheck, timezone: timezone} } type FwvRecordsProcessor struct { file *os.File dfltCfg *config.CdrcConfig // General parameters - cdrcCfgs map[string]*config.CdrcConfig + cdrcCfgs []*config.CdrcConfig httpClient *http.Client httpSkipTlsCheck bool timezone string @@ -123,11 +125,11 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.CDR, error) { } self.processedRecordsNr += 1 record := string(buf) - for cfgKey, cdrcCfg := range self.cdrcCfgs { - if passes := self.recordPassesCfgFilter(record, cfgKey); !passes { + for _, cdrcCfg := range self.cdrcCfgs { + if passes := self.recordPassesCfgFilter(record, cdrcCfg); !passes { continue } - if storedCdr, err := self.recordToStoredCdr(record, cfgKey); err != nil { + if storedCdr, err := self.recordToStoredCdr(record, cdrcCfg, cdrcCfg.ID); err != nil { return nil, fmt.Errorf("Failed converting to StoredCdr, error: %s", err.Error()) } else { recordCdrs = append(recordCdrs, storedCdr) @@ -139,9 +141,9 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.CDR, error) { return recordCdrs, nil } -func (self *FwvRecordsProcessor) recordPassesCfgFilter(record, configKey string) bool { +func (self *FwvRecordsProcessor) recordPassesCfgFilter(record string, cdrcCfg *config.CdrcConfig) bool { filterPasses := true - for _, rsrFilter := range self.cdrcCfgs[configKey].CdrFilter { + for _, rsrFilter := range cdrcCfg.CdrFilter { if rsrFilter == nil { // Nil filter does not need to match anything continue } @@ -156,8 +158,8 @@ func (self *FwvRecordsProcessor) recordPassesCfgFilter(record, configKey string) return filterPasses } -// Converts a record (header or normal) to StoredCdr -func (self *FwvRecordsProcessor) recordToStoredCdr(record string, cfgKey string) (*engine.CDR, error) { +// Converts a record (header or normal) to CDR +func (self *FwvRecordsProcessor) recordToStoredCdr(record string, cdrcCfg *config.CdrcConfig, cfgKey string) (*engine.CDR, error) { var err error var lazyHttpFields []*config.CfgCdrField var cfgFields []*config.CfgCdrField @@ -169,13 +171,13 @@ func (self *FwvRecordsProcessor) recordToStoredCdr(record string, cfgKey string) storedCdr = &engine.CDR{OriginHost: "0.0.0.0", ExtraFields: make(map[string]string), Cost: -1} } if cfgKey == "*header" { - cfgFields = self.dfltCfg.HeaderFields - storedCdr.Source = self.dfltCfg.CdrSourceId - duMultiplyFactor = self.dfltCfg.DataUsageMultiplyFactor + cfgFields = cdrcCfg.HeaderFields + storedCdr.Source = cdrcCfg.CdrSourceId + duMultiplyFactor = cdrcCfg.DataUsageMultiplyFactor } else { - cfgFields = self.cdrcCfgs[cfgKey].ContentFields - storedCdr.Source = self.cdrcCfgs[cfgKey].CdrSourceId - duMultiplyFactor = self.cdrcCfgs[cfgKey].DataUsageMultiplyFactor + cfgFields = cdrcCfg.ContentFields + storedCdr.Source = cdrcCfg.CdrSourceId + duMultiplyFactor = cdrcCfg.DataUsageMultiplyFactor } for _, cdrFldCfg := range cfgFields { var fieldVal string @@ -214,7 +216,12 @@ func (self *FwvRecordsProcessor) recordToStoredCdr(record string, cfgKey string) for _, rsrFld := range httpFieldCfg.Value { httpAddr += rsrFld.ParseValue("") } - if outValByte, err = utils.HttpJsonPost(httpAddr, self.httpSkipTlsCheck, storedCdr); err != nil && httpFieldCfg.Mandatory { + var jsn []byte + jsn, err = json.Marshal(storedCdr) + if err != nil { + return nil, err + } + if outValByte, err = utils.HttpJsonPost(httpAddr, self.httpSkipTlsCheck, jsn); err != nil && httpFieldCfg.Mandatory { return nil, err } else { fieldVal = string(outValByte) @@ -237,7 +244,7 @@ func (self *FwvRecordsProcessor) processHeader() error { return fmt.Errorf("In header, line len: %d, have read: %d", self.lineLen, nRead) } var err error - if self.headerCdr, err = self.recordToStoredCdr(string(buf), "*header"); err != nil { + if self.headerCdr, err = self.recordToStoredCdr(string(buf), self.dfltCfg, "*header"); err != nil { return err } return nil diff --git a/cdrc/fwv_local_test.go b/cdrc/fwv_local_test.go index 7f4b96fb4..53730ac12 100644 --- a/cdrc/fwv_local_test.go +++ b/cdrc/fwv_local_test.go @@ -19,8 +19,6 @@ along with this program. If not, see package cdrc import ( - "github.com/cgrates/cgrates/config" - "github.com/cgrates/cgrates/engine" "io/ioutil" "net/rpc" "net/rpc/jsonrpc" @@ -28,6 +26,9 @@ import ( "path" "testing" "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" ) var fwvCfgPath string @@ -91,7 +92,11 @@ func TestFwvLclCreateCdrFiles(t *testing.T) { if fwvCfg == nil { t.Fatal("Empty default cdrc configuration") } - fwvCdrcCfg = fwvCfg.CdrcProfiles["/tmp/cgr_fwv/cdrc/in"]["FWV1"] + for _, cdrcCfg := range fwvCfg.CdrcProfiles["/tmp/cgr_fwv/cdrc/in"] { + if cdrcCfg.ID == "FWV1" { + fwvCdrcCfg = cdrcCfg + } + } if err := os.RemoveAll(fwvCdrcCfg.CdrInDir); err != nil { t.Fatal("Error removing folder: ", fwvCdrcCfg.CdrInDir, err) } diff --git a/cdrc/fwv_test.go b/cdrc/fwv_test.go index a5a1175cd..7623ec124 100644 --- a/cdrc/fwv_test.go +++ b/cdrc/fwv_test.go @@ -45,11 +45,11 @@ func TestFwvValue(t *testing.T) { func TestFwvRecordPassesCfgFilter(t *testing.T) { //record, configKey string) bool { cgrConfig, _ := config.NewDefaultCGRConfig() - cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][utils.META_DEFAULT] // We don't really care that is for .csv since all we want to test are the filters + cdrcConfig := cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"][0] // We don't really care that is for .csv since all we want to test are the filters cdrcConfig.CdrFilter = utils.ParseRSRFieldsMustCompile(`~52:s/^0(\d{9})/+49${1}/(^+49123123120)`, utils.INFIELD_SEP) fwvRp := &FwvRecordsProcessor{cdrcCfgs: cgrConfig.CdrcProfiles["/var/log/cgrates/cdrc/in"]} cdrLine := "CDR0000010 0 20120708181506000123451234 0040123123120 004 000018009980010001ISDN ABC 10Buiten uw regio EHV 00000009190000000009" - if passesFilter := fwvRp.recordPassesCfgFilter(cdrLine, utils.META_DEFAULT); !passesFilter { + if passesFilter := fwvRp.recordPassesCfgFilter(cdrLine, cdrcConfig); !passesFilter { t.Error("Not passes filter") } } diff --git a/cdre/cdrexporter.go b/cdre/cdrexporter.go index a98a26c95..c269385e1 100644 --- a/cdre/cdrexporter.go +++ b/cdre/cdrexporter.go @@ -116,13 +116,13 @@ type CdrExporter struct { // Return Json marshaled callCost attached to // Keep it separately so we test only this part in local tests func (cdre *CdrExporter) getCdrCostDetails(CGRID, runId string) (string, error) { - cc, err := cdre.cdrDb.GetCallCostLog(CGRID, runId) + smcs, err := cdre.cdrDb.GetSMCosts(CGRID, runId, "", "") if err != nil { return "", err - } else if cc == nil { + } else if len(smcs) == 0 { return "", nil } - ccJson, _ := json.Marshal(cc) + ccJson, _ := json.Marshal(smcs[0].CostDetails) return string(ccJson), nil } @@ -352,9 +352,13 @@ func (cdre *CdrExporter) processCdr(cdr *engine.CDR) error { case utils.META_HTTP_POST: var outValByte []byte httpAddr := cfgFld.Value.Id() + jsn, err := json.Marshal(cdr) + if err != nil { + return err + } if len(httpAddr) == 0 { err = fmt.Errorf("Empty http address for field %s type %s", cfgFld.Tag, cfgFld.Type) - } else if outValByte, err = utils.HttpJsonPost(httpAddr, cdre.httpSkipTlsCheck, cdr); err == nil { + } else if outValByte, err = utils.HttpJsonPost(httpAddr, cdre.httpSkipTlsCheck, jsn); err == nil { outVal = string(outValByte) if len(outVal) == 0 && cfgFld.Mandatory { err = fmt.Errorf("Empty result for http_post field: %s", cfgFld.Tag) diff --git a/cmd/cgr-engine/cgr-engine.go b/cmd/cgr-engine/cgr-engine.go index 1c1da74d7..ddf5b3b14 100644 --- a/cmd/cgr-engine/cgr-engine.go +++ b/cmd/cgr-engine/cgr-engine.go @@ -22,8 +22,8 @@ import ( "flag" "fmt" "log" + // _ "net/http/pprof" "os" - "reflect" "runtime" "runtime/pprof" "strconv" @@ -71,7 +71,7 @@ var ( err error ) -func startCdrcs(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan *engine.Responder, exitChan chan bool) { +func startCdrcs(internalCdrSChan, internalRaterChan chan rpcclient.RpcClientConnection, exitChan chan bool) { cdrcInitialized := false // Control whether the cdrc was already initialized (so we don't reload in that case) var cdrcChildrenChan chan struct{} // Will use it to communicate with the children of one fork for { @@ -87,42 +87,34 @@ func startCdrcs(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan } // Start CDRCs for _, cdrcCfgs := range cfg.CdrcProfiles { - var cdrcCfg *config.CdrcConfig - for _, cdrcCfg = range cdrcCfgs { // Take a random config out since they should be the same - break + var enabledCfgs []*config.CdrcConfig + for _, cdrcCfg := range cdrcCfgs { // Take a random config out since they should be the same + if cdrcCfg.Enabled { + enabledCfgs = append(enabledCfgs, cdrcCfg) + } } - if cdrcCfg.Enabled == false { - continue // Ignore not enabled + + if len(enabledCfgs) != 0 { + go startCdrc(internalCdrSChan, internalRaterChan, cdrcCfgs, cfg.HttpSkipTlsVerify, cdrcChildrenChan, exitChan) } - go startCdrc(internalCdrSChan, internalRaterChan, cdrcCfgs, cfg.HttpSkipTlsVerify, cdrcChildrenChan, exitChan) } cdrcInitialized = true // Initialized - } } // Fires up a cdrc instance -func startCdrc(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan *engine.Responder, cdrcCfgs map[string]*config.CdrcConfig, httpSkipTlsCheck bool, +func startCdrc(internalCdrSChan, internalRaterChan chan rpcclient.RpcClientConnection, cdrcCfgs []*config.CdrcConfig, httpSkipTlsCheck bool, closeChan chan struct{}, exitChan chan bool) { - var cdrsConn engine.Connector var cdrcCfg *config.CdrcConfig for _, cdrcCfg = range cdrcCfgs { // Take the first config out, does not matter which one break } - if cdrcCfg.Cdrs == utils.INTERNAL { - cdrsChan := <-internalCdrSChan // This will signal that the cdrs part is populated in internalRaterChan - internalCdrSChan <- cdrsChan // Put it back for other components - resp := <-internalRaterChan - cdrsConn = resp - internalRaterChan <- resp - } else { - conn, err := rpcclient.NewRpcClient("tcp", cdrcCfg.Cdrs, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %v", err)) - exitChan <- true - return - } - cdrsConn = &engine.RPCClientConnector{Client: conn} + cdrsConn, err := engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cdrcCfg.CdrsConns, internalCdrSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %s", err.Error())) + exitChan <- true + return } cdrc, err := cdrc.NewCdrc(cdrcCfgs, httpSkipTlsCheck, cdrsConn, closeChan, cfg.DefaultTimezone) if err != nil { @@ -136,51 +128,31 @@ func startCdrc(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan * } } -func startSmGeneric(internalSMGChan chan rpcclient.RpcClientConnection, internalRaterChan chan *engine.Responder, server *utils.Server, exitChan chan bool) { - utils.Logger.Info("Starting CGRateS SM-Generic service.") - var raterConn, cdrsConn engine.Connector - var client *rpcclient.RpcClient - var err error - // Connect to rater - for _, raterCfg := range cfg.SmGenericConfig.HaRater { - if raterCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - raterConn = resp // Will overwrite here for the sake of keeping internally the new configuration format for ha connections - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { //Connected so no need to reiterate - utils.Logger.Crit(fmt.Sprintf(" Could not connect to Rater via RPC: %v", err)) - exitChan <- true - return - } - raterConn = &engine.RPCClientConnector{Client: client} +func startSmGeneric(internalSMGChan chan rpcclient.RpcClientConnection, internalRaterChan, internalCDRSChan chan rpcclient.RpcClientConnection, server *utils.Server, exitChan chan bool) { + utils.Logger.Info("Starting CGRateS SMGeneric service.") + var ralsConns, cdrsConn *rpcclient.RpcClientPool + if len(cfg.SmGenericConfig.RALsConns) != 0 { + ralsConns, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmGenericConfig.RALsConns, internalRaterChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } - // Connect to CDRS - if reflect.DeepEqual(cfg.SmGenericConfig.HaCdrs, cfg.SmGenericConfig.HaRater) { - cdrsConn = raterConn - } else if len(cfg.SmGenericConfig.HaCdrs) != 0 { - for _, cdrsCfg := range cfg.SmGenericConfig.HaCdrs { - if cdrsCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - cdrsConn = resp - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %v", err)) - exitChan <- true - return - } - cdrsConn = &engine.RPCClientConnector{Client: client} - } + if len(cfg.SmGenericConfig.CDRsConns) != 0 { + cdrsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmGenericConfig.CDRsConns, internalCDRSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } smg_econns := sessionmanager.NewSMGExternalConnections() - sm := sessionmanager.NewSMGeneric(cfg, raterConn, cdrsConn, cfg.DefaultTimezone, smg_econns) + sm := sessionmanager.NewSMGeneric(cfg, ralsConns, cdrsConn, cfg.DefaultTimezone, smg_econns) if err = sm.Connect(); err != nil { - utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) + utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) } // Register RPC handler smgRpc := v1.NewSMGenericV1(sm) @@ -198,31 +170,24 @@ func startSmGeneric(internalSMGChan chan rpcclient.RpcClientConnection, internal func startDiameterAgent(internalSMGChan, internalPubSubSChan chan rpcclient.RpcClientConnection, exitChan chan bool) { utils.Logger.Info("Starting CGRateS DiameterAgent service.") - var smgConn, pubsubConn *rpcclient.RpcClient - var err error - if cfg.DiameterAgentCfg().SMGeneric == utils.INTERNAL { - smgRpc := <-internalSMGChan - internalSMGChan <- smgRpc - smgConn, err = rpcclient.NewRpcClient("", "", 0, 0, rpcclient.INTERNAL_RPC, smgRpc) - } else { - smgConn, err = rpcclient.NewRpcClient("tcp", cfg.DiameterAgentCfg().SMGeneric, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) + var smgConn, pubsubConn *rpcclient.RpcClientPool + if len(cfg.DiameterAgentCfg().SMGenericConns) != 0 { + smgConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.DiameterAgentCfg().SMGenericConns, internalSMGChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to SMG: %s", err.Error())) + exitChan <- true + return + } } - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to SMG: %s", err.Error())) - exitChan <- true - return - } - if cfg.DiameterAgentCfg().PubSubS == utils.INTERNAL { - pubSubRpc := <-internalPubSubSChan - internalPubSubSChan <- pubSubRpc - pubsubConn, err = rpcclient.NewRpcClient("", "", 0, 0, rpcclient.INTERNAL_RPC, pubSubRpc) - } else if len(cfg.DiameterAgentCfg().PubSubS) != 0 { - pubsubConn, err = rpcclient.NewRpcClient("tcp", cfg.DiameterAgentCfg().PubSubS, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - } - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to PubSubS: %s", err.Error())) - exitChan <- true - return + if len(cfg.DiameterAgentCfg().PubSubConns) != 0 { + pubsubConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.DiameterAgentCfg().PubSubConns, internalPubSubSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to PubSubS: %s", err.Error())) + exitChan <- true + return + } } da, err := agents.NewDiameterAgent(cfg, smgConn, pubsubConn) if err != nil { @@ -236,146 +201,86 @@ func startDiameterAgent(internalSMGChan, internalPubSubSChan chan rpcclient.RpcC exitChan <- true } -func startSmFreeSWITCH(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) { - utils.Logger.Info("Starting CGRateS SM-FreeSWITCH service.") - var raterConn, cdrsConn engine.Connector - var client *rpcclient.RpcClient - var err error - // Connect to rater - for _, raterCfg := range cfg.SmFsConfig.HaRater { - if raterCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - raterConn = resp // Will overwrite here for the sake of keeping internally the new configuration format for ha connections - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { //Connected so no need to reiterate - utils.Logger.Crit(fmt.Sprintf(" Could not connect to rater via RPC: %v", err)) - exitChan <- true - return - } - raterConn = &engine.RPCClientConnector{Client: client} +func startSmFreeSWITCH(internalRaterChan, internalCDRSChan chan rpcclient.RpcClientConnection, cdrDb engine.CdrStorage, exitChan chan bool) { + utils.Logger.Info("Starting CGRateS SMFreeSWITCH service.") + var ralsConn, cdrsConn *rpcclient.RpcClientPool + if len(cfg.SmFsConfig.RALsConns) != 0 { + ralsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmFsConfig.RALsConns, internalRaterChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } - // Connect to CDRS - if reflect.DeepEqual(cfg.SmFsConfig.HaCdrs, cfg.SmFsConfig.HaRater) { - cdrsConn = raterConn - } else if len(cfg.SmFsConfig.HaCdrs) != 0 { - for _, cdrsCfg := range cfg.SmFsConfig.HaCdrs { - if cdrsCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - cdrsConn = resp - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %v", err)) - exitChan <- true - return - } - cdrsConn = &engine.RPCClientConnector{Client: client} - } + if len(cfg.SmFsConfig.CDRsConns) != 0 { + cdrsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmFsConfig.CDRsConns, internalCDRSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } - sm := sessionmanager.NewFSSessionManager(cfg.SmFsConfig, raterConn, cdrsConn, cfg.DefaultTimezone) + sm := sessionmanager.NewFSSessionManager(cfg.SmFsConfig, ralsConn, cdrsConn, cfg.DefaultTimezone) smRpc.SMs = append(smRpc.SMs, sm) if err = sm.Connect(); err != nil { - utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) + utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) } exitChan <- true } -func startSmKamailio(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) { - utils.Logger.Info("Starting CGRateS SM-Kamailio service.") - var raterConn, cdrsConn engine.Connector - var client *rpcclient.RpcClient - var err error - // Connect to rater - for _, raterCfg := range cfg.SmKamConfig.HaRater { - if raterCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - raterConn = resp // Will overwrite here for the sake of keeping internally the new configuration format for ha connections - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { //Connected so no need to reiterate - utils.Logger.Crit(fmt.Sprintf(" Could not connect to rater via RPC: %v", err)) - exitChan <- true - return - } - raterConn = &engine.RPCClientConnector{Client: client} +func startSmKamailio(internalRaterChan, internalCDRSChan chan rpcclient.RpcClientConnection, cdrDb engine.CdrStorage, exitChan chan bool) { + utils.Logger.Info("Starting CGRateS SMKamailio service.") + var ralsConn, cdrsConn *rpcclient.RpcClientPool + if len(cfg.SmKamConfig.RALsConns) != 0 { + ralsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmKamConfig.RALsConns, internalRaterChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } - // Connect to CDRS - if reflect.DeepEqual(cfg.SmKamConfig.HaCdrs, cfg.SmKamConfig.HaRater) { - cdrsConn = raterConn - } else if len(cfg.SmKamConfig.HaCdrs) != 0 { - for _, cdrsCfg := range cfg.SmKamConfig.HaCdrs { - if cdrsCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - cdrsConn = resp - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %v", err)) - exitChan <- true - return - } - cdrsConn = &engine.RPCClientConnector{Client: client} - } + if len(cfg.SmKamConfig.CDRsConns) != 0 { + cdrsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmKamConfig.CDRsConns, internalCDRSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) + exitChan <- true + return } } - sm, _ := sessionmanager.NewKamailioSessionManager(cfg.SmKamConfig, raterConn, cdrsConn, cfg.DefaultTimezone) + sm, _ := sessionmanager.NewKamailioSessionManager(cfg.SmKamConfig, ralsConn, cdrsConn, cfg.DefaultTimezone) smRpc.SMs = append(smRpc.SMs, sm) if err = sm.Connect(); err != nil { - utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) + utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) } exitChan <- true } -func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) { - utils.Logger.Info("Starting CGRateS SM-OpenSIPS service.") - var raterConn, cdrsConn engine.Connector - var client *rpcclient.RpcClient - var err error - // Connect to rater - for _, raterCfg := range cfg.SmOsipsConfig.HaRater { - if raterCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - raterConn = resp // Will overwrite here for the sake of keeping internally the new configuration format for ha connections - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { //Connected so no need to reiterate - utils.Logger.Crit(fmt.Sprintf(" Could not connect to rater via RPC: %v", err)) - exitChan <- true - return - } - raterConn = &engine.RPCClientConnector{Client: client} +func startSmOpenSIPS(internalRaterChan, internalCDRSChan chan rpcclient.RpcClientConnection, cdrDb engine.CdrStorage, exitChan chan bool) { + utils.Logger.Info("Starting CGRateS SMOpenSIPS service.") + var ralsConn, cdrsConn *rpcclient.RpcClientPool + if len(cfg.SmOsipsConfig.RALsConns) != 0 { + ralsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmOsipsConfig.RALsConns, internalRaterChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RALs: %s", err.Error())) + exitChan <- true + return } } - // Connect to CDRS - if reflect.DeepEqual(cfg.SmOsipsConfig.HaCdrs, cfg.SmOsipsConfig.HaRater) { - cdrsConn = raterConn - } else if len(cfg.SmOsipsConfig.HaCdrs) != 0 { - for _, cdrsCfg := range cfg.SmOsipsConfig.HaCdrs { - if cdrsCfg.Server == utils.INTERNAL { - resp := <-internalRaterChan - cdrsConn = resp - internalRaterChan <- resp - } else { - client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRS via RPC: %v", err)) - exitChan <- true - return - } - cdrsConn = &engine.RPCClientConnector{Client: client} - } + if len(cfg.SmOsipsConfig.CDRsConns) != 0 { + cdrsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.SmOsipsConfig.CDRsConns, internalRaterChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRs: %s", err.Error())) + exitChan <- true + return } } - sm, _ := sessionmanager.NewOSipsSessionManager(cfg.SmOsipsConfig, cfg.Reconnects, raterConn, cdrsConn, cfg.DefaultTimezone) + sm, _ := sessionmanager.NewOSipsSessionManager(cfg.SmOsipsConfig, cfg.Reconnects, ralsConn, cdrsConn, cfg.DefaultTimezone) smRpc.SMs = append(smRpc.SMs, sm) if err := sm.Connect(); err != nil { utils.Logger.Err(fmt.Sprintf(" error: %s!", err)) @@ -383,102 +288,60 @@ func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS exitChan <- true } -func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage, cdrDb engine.CdrStorage, - internalRaterChan chan *engine.Responder, internalPubSubSChan chan rpcclient.RpcClientConnection, - internalUserSChan chan engine.UserService, internalAliaseSChan chan engine.AliasService, - internalCdrStatSChan chan engine.StatsInterface, server *utils.Server, exitChan chan bool) { +func startCDRS(internalCdrSChan chan rpcclient.RpcClientConnection, logDb engine.LogStorage, cdrDb engine.CdrStorage, + internalRaterChan chan rpcclient.RpcClientConnection, internalPubSubSChan chan rpcclient.RpcClientConnection, + internalUserSChan chan rpcclient.RpcClientConnection, internalAliaseSChan chan rpcclient.RpcClientConnection, + internalCdrStatSChan chan rpcclient.RpcClientConnection, server *utils.Server, exitChan chan bool) { utils.Logger.Info("Starting CGRateS CDRS service.") - var err error - var client *rpcclient.RpcClient - // Rater connection init - var raterConn engine.Connector - if cfg.CDRSRater == utils.INTERNAL { - responder := <-internalRaterChan // Wait for rater to come up before start querying - raterConn = responder - internalRaterChan <- responder // Put back the connection since there might be other entities waiting for it - } else if len(cfg.CDRSRater) != 0 { - client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSRater, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) + var ralConn, pubSubConn, usersConn, aliasesConn, statsConn *rpcclient.RpcClientPool + if len(cfg.CDRSRaterConns) != 0 { // Conn pool towards RAL + ralConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.CDRSRaterConns, internalRaterChan, cfg.InternalTtl) if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to rater: %s", err.Error())) + utils.Logger.Crit(fmt.Sprintf(" Could not connect to RAL: %s", err.Error())) exitChan <- true return } - raterConn = &engine.RPCClientConnector{Client: client} } - // Pubsub connection init - var pubSubConn rpcclient.RpcClientConnection - if cfg.CDRSPubSub == utils.INTERNAL { - pubSubs := <-internalPubSubSChan - pubSubConn = pubSubs - internalPubSubSChan <- pubSubs - } else if len(cfg.CDRSPubSub) != 0 { - client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSPubSub, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) + if len(cfg.CDRSPubSubSConns) != 0 { // Pubsub connection init + pubSubConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.CDRSPubSubSConns, internalPubSubSChan, cfg.InternalTtl) if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to pubsub server: %s", err.Error())) + utils.Logger.Crit(fmt.Sprintf(" Could not connect to PubSubSystem: %s", err.Error())) exitChan <- true return } - pubSubConn = client } - // Users connection init - var usersConn engine.UserService - if cfg.CDRSUsers == utils.INTERNAL { - userS := <-internalUserSChan - usersConn = userS - internalUserSChan <- userS - } else if len(cfg.CDRSUsers) != 0 { - if cfg.CDRSRater == cfg.CDRSUsers { - usersConn = &engine.ProxyUserService{Client: client} - } else { - client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSUsers, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to users server: %s", err.Error())) - exitChan <- true - return - } - usersConn = &engine.ProxyUserService{Client: client} + if len(cfg.CDRSUserSConns) != 0 { // Users connection init + usersConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.CDRSUserSConns, internalUserSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to UserS: %s", err.Error())) + exitChan <- true + return } } - // Aliases connection init - var aliasesConn engine.AliasService - if cfg.CDRSAliases == utils.INTERNAL { - aliaseS := <-internalAliaseSChan - aliasesConn = aliaseS - internalAliaseSChan <- aliaseS - } else if len(cfg.CDRSAliases) != 0 { - if cfg.CDRSRater == cfg.CDRSAliases { - aliasesConn = &engine.ProxyAliasService{Client: client} - } else { - client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSAliases, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to aliases server: %s", err.Error())) - exitChan <- true - return - } - aliasesConn = &engine.ProxyAliasService{Client: client} - } - } - // Stats connection init - var statsConn engine.StatsInterface - if cfg.CDRSStats == utils.INTERNAL { - statS := <-internalCdrStatSChan - statsConn = statS - internalCdrStatSChan <- statS - } else if len(cfg.CDRSStats) != 0 { - if cfg.CDRSRater == cfg.CDRSStats { - statsConn = &engine.ProxyStats{Client: client} - } else { - client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSStats, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil) - if err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to stats server: %s", err.Error())) - exitChan <- true - return - } - statsConn = &engine.ProxyStats{Client: client} + if len(cfg.CDRSAliaseSConns) != 0 { // Aliases connection init + aliasesConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.CDRSAliaseSConns, internalAliaseSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to AliaseS: %s", err.Error())) + exitChan <- true + return } } - cdrServer, _ := engine.NewCdrServer(cfg, cdrDb, raterConn, pubSubConn, usersConn, aliasesConn, statsConn) + if len(cfg.CDRSStatSConns) != 0 { // Stats connection init + statsConn, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.CDRSStatSConns, internalCdrStatSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to StatS: %s", err.Error())) + exitChan <- true + return + } + } + cdrServer, _ := engine.NewCdrServer(cfg, cdrDb, ralConn, pubSubConn, usersConn, aliasesConn, statsConn) + cdrServer.SetTimeToLive(cfg.ResponseCacheTTL, nil) utils.Logger.Info("Registering CDRS HTTP Handlers.") cdrServer.RegisterHandlersToServer(server) utils.Logger.Info("Registering CDRS RPC service.") @@ -486,10 +349,8 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage, server.RpcRegister(&cdrSrv) server.RpcRegister(&v2.CdrsV2{CdrsV1: cdrSrv}) // Make the cdr server available for internal communication - responder := <-internalRaterChan // Retrieve again the responder - responder.CdrSrv = cdrServer // Attach connection to cdrServer in responder, so it can be used later - internalRaterChan <- responder // Put back the connection for the rest of the system - internalCdrSChan <- cdrServer // Signal that cdrS is operational + server.RpcRegister(cdrServer) // register CdrServer for internal usage (TODO: refactor this) + internalCdrSChan <- cdrServer // Signal that cdrS is operational } func startScheduler(internalSchedulerChan chan *scheduler.Scheduler, cacheDoneChan chan struct{}, ratingDb engine.RatingStorage, exitChan chan bool) { @@ -506,20 +367,20 @@ func startScheduler(internalSchedulerChan chan *scheduler.Scheduler, cacheDoneCh exitChan <- true // Should not get out of loop though } -func startCdrStats(internalCdrStatSChan chan engine.StatsInterface, ratingDb engine.RatingStorage, accountDb engine.AccountingStorage, server *utils.Server) { +func startCdrStats(internalCdrStatSChan chan rpcclient.RpcClientConnection, ratingDb engine.RatingStorage, accountDb engine.AccountingStorage, server *utils.Server) { cdrStats := engine.NewStats(ratingDb, accountDb, cfg.CDRStatsSaveInterval) server.RpcRegister(cdrStats) server.RpcRegister(&v1.CDRStatsV1{CdrStats: cdrStats}) // Public APIs internalCdrStatSChan <- cdrStats } -func startHistoryServer(internalHistorySChan chan history.Scribe, server *utils.Server, exitChan chan bool) { +func startHistoryServer(internalHistorySChan chan rpcclient.RpcClientConnection, server *utils.Server, exitChan chan bool) { scribeServer, err := history.NewFileScribe(cfg.HistoryDir, cfg.HistorySaveInterval) if err != nil { utils.Logger.Crit(fmt.Sprintf(" Could not start, error: %s", err.Error())) exitChan <- true } - server.RpcRegisterName("ScribeV1", scribeServer) + server.RpcRegisterName("HistoryV1", scribeServer) internalHistorySChan <- scribeServer } @@ -530,7 +391,7 @@ func startPubSubServer(internalPubSubSChan chan rpcclient.RpcClientConnection, a } // ToDo: Make sure we are caching before starting this one -func startAliasesServer(internalAliaseSChan chan engine.AliasService, accountDb engine.AccountingStorage, server *utils.Server, exitChan chan bool) { +func startAliasesServer(internalAliaseSChan chan rpcclient.RpcClientConnection, accountDb engine.AccountingStorage, server *utils.Server, exitChan chan bool) { aliasesServer := engine.NewAliasHandler(accountDb) server.RpcRegisterName("AliasesV1", aliasesServer) if err := accountDb.CacheAccountingPrefixes(utils.ALIASES_PREFIX); err != nil { @@ -541,7 +402,7 @@ func startAliasesServer(internalAliaseSChan chan engine.AliasService, accountDb internalAliaseSChan <- aliasesServer } -func startUsersServer(internalUserSChan chan engine.UserService, accountDb engine.AccountingStorage, server *utils.Server, exitChan chan bool) { +func startUsersServer(internalUserSChan chan rpcclient.RpcClientConnection, accountDb engine.AccountingStorage, server *utils.Server, exitChan chan bool) { userServer, err := engine.NewUserMap(accountDb, cfg.UserServerIndexes) if err != nil { utils.Logger.Crit(fmt.Sprintf(" Could not start, error: %s", err.Error())) @@ -552,13 +413,9 @@ func startUsersServer(internalUserSChan chan engine.UserService, accountDb engin internalUserSChan <- userServer } -func startRpc(server *utils.Server, internalRaterChan chan *engine.Responder, - internalCdrSChan chan *engine.CdrServer, - internalCdrStatSChan chan engine.StatsInterface, - internalHistorySChan chan history.Scribe, - internalPubSubSChan chan rpcclient.RpcClientConnection, - internalUserSChan chan engine.UserService, - internalAliaseSChan chan engine.AliasService) { +func startRpc(server *utils.Server, internalRaterChan, + internalCdrSChan, internalCdrStatSChan, internalHistorySChan, internalPubSubSChan, internalUserSChan, + internalAliaseSChan chan rpcclient.RpcClientConnection) { select { // Any of the rpc methods will unlock listening to rpc requests case resp := <-internalRaterChan: internalRaterChan <- resp @@ -619,7 +476,7 @@ func main() { } config.SetCgrConfig(cfg) // Share the config object if *raterEnabled { - cfg.RaterEnabled = *raterEnabled + cfg.RALsEnabled = *raterEnabled } if *schedEnabled { cfg.SchedulerEnabled = *schedEnabled @@ -632,7 +489,7 @@ func main() { var logDb engine.LogStorage var loadDb engine.LoadStorage var cdrDb engine.CdrStorage - if cfg.RaterEnabled || cfg.SchedulerEnabled || cfg.CDRStatsEnabled { // Only connect to dataDb if necessary + if cfg.RALsEnabled || cfg.SchedulerEnabled || cfg.CDRStatsEnabled { // Only connect to dataDb if necessary ratingDb, err = engine.ConfigureRatingStorage(cfg.TpDbType, cfg.TpDbHost, cfg.TpDbPort, cfg.TpDbName, cfg.TpDbUser, cfg.TpDbPass, cfg.DBDataEncoding) if err != nil { // Cannot configure getter database, show stopper @@ -642,7 +499,7 @@ func main() { defer ratingDb.Close() engine.SetRatingStorage(ratingDb) } - if cfg.RaterEnabled || cfg.CDRStatsEnabled || cfg.PubSubServerEnabled || cfg.AliasesServerEnabled || cfg.UserServerEnabled { + if cfg.RALsEnabled || cfg.CDRStatsEnabled || cfg.PubSubServerEnabled || cfg.AliasesServerEnabled || cfg.UserServerEnabled { accountDb, err = engine.ConfigureAccountingStorage(cfg.DataDbType, cfg.DataDbHost, cfg.DataDbPort, cfg.DataDbName, cfg.DataDbUser, cfg.DataDbPass, cfg.DBDataEncoding) if err != nil { // Cannot configure getter database, show stopper @@ -651,8 +508,12 @@ func main() { } defer accountDb.Close() engine.SetAccountingStorage(accountDb) + if err := engine.CheckVersion(); err != nil { + fmt.Println(err.Error()) + return + } } - if cfg.RaterEnabled || cfg.CDRSEnabled || cfg.SchedulerEnabled { // Only connect to storDb if necessary + if cfg.RALsEnabled || cfg.CDRSEnabled || cfg.SchedulerEnabled { // Only connect to storDb if necessary logDb, err = engine.ConfigureLogStorage(cfg.StorDBType, cfg.StorDBHost, cfg.StorDBPort, cfg.StorDBName, cfg.StorDBUser, cfg.StorDBPass, cfg.DBDataEncoding, cfg.StorDBMaxOpenConns, cfg.StorDBMaxIdleConns, cfg.StorDBCDRSIndexes) if err != nil { // Cannot configure logger database, show stopper @@ -668,6 +529,8 @@ func main() { } engine.SetRoundingDecimals(cfg.RoundingDecimals) + engine.SetRpSubjectPrefixMatching(cfg.RpSubjectPrefixMatching) + engine.SetLcrSubjectPrefixMatching(cfg.LcrSubjectPrefixMatching) stopHandled := false // Rpc/http server @@ -678,15 +541,15 @@ func main() { // Define internal connections via channels internalBalancerChan := make(chan *balancer2go.Balancer, 1) - internalRaterChan := make(chan *engine.Responder, 1) + internalRaterChan := make(chan rpcclient.RpcClientConnection, 1) cacheDoneChan := make(chan struct{}, 1) internalSchedulerChan := make(chan *scheduler.Scheduler, 1) - internalCdrSChan := make(chan *engine.CdrServer, 1) - internalCdrStatSChan := make(chan engine.StatsInterface, 1) - internalHistorySChan := make(chan history.Scribe, 1) + internalCdrSChan := make(chan rpcclient.RpcClientConnection, 1) + internalCdrStatSChan := make(chan rpcclient.RpcClientConnection, 1) + internalHistorySChan := make(chan rpcclient.RpcClientConnection, 1) internalPubSubSChan := make(chan rpcclient.RpcClientConnection, 1) - internalUserSChan := make(chan engine.UserService, 1) - internalAliaseSChan := make(chan engine.AliasService, 1) + internalUserSChan := make(chan rpcclient.RpcClientConnection, 1) + internalAliaseSChan := make(chan rpcclient.RpcClientConnection, 1) internalSMGChan := make(chan rpcclient.RpcClientConnection, 1) // Start balancer service if cfg.BalancerEnabled { @@ -694,7 +557,7 @@ func main() { } // Start rater service - if cfg.RaterEnabled { + if cfg.RALsEnabled { go startRater(internalRaterChan, cacheDoneChan, internalBalancerChan, internalSchedulerChan, internalCdrStatSChan, internalHistorySChan, internalPubSubSChan, internalUserSChan, internalAliaseSChan, server, ratingDb, accountDb, loadDb, cdrDb, logDb, &stopHandled, exitChan) } @@ -719,23 +582,23 @@ func main() { // Start SM-Generic if cfg.SmGenericConfig.Enabled { - go startSmGeneric(internalSMGChan, internalRaterChan, server, exitChan) + go startSmGeneric(internalSMGChan, internalRaterChan, internalCdrSChan, server, exitChan) } // Start SM-FreeSWITCH if cfg.SmFsConfig.Enabled { - go startSmFreeSWITCH(internalRaterChan, cdrDb, exitChan) + go startSmFreeSWITCH(internalRaterChan, internalCdrSChan, cdrDb, exitChan) // close all sessions on shutdown go shutdownSessionmanagerSingnalHandler(exitChan) } // Start SM-Kamailio if cfg.SmKamConfig.Enabled { - go startSmKamailio(internalRaterChan, cdrDb, exitChan) + go startSmKamailio(internalRaterChan, internalCdrSChan, cdrDb, exitChan) } // Start SM-OpenSIPS if cfg.SmOsipsConfig.Enabled { - go startSmOpenSIPS(internalRaterChan, cdrDb, exitChan) + go startSmOpenSIPS(internalRaterChan, internalCdrSChan, cdrDb, exitChan) } // Register session manager service // FixMe: make sure this is thread safe diff --git a/cmd/cgr-engine/rater.go b/cmd/cgr-engine/rater.go index 41535636d..8fe1ee27d 100644 --- a/cmd/cgr-engine/rater.go +++ b/cmd/cgr-engine/rater.go @@ -40,9 +40,10 @@ func startBalancer(internalBalancerChan chan *balancer2go.Balancer, stopHandled } // Starts rater and reports on chan -func startRater(internalRaterChan chan *engine.Responder, cacheDoneChan chan struct{}, internalBalancerChan chan *balancer2go.Balancer, internalSchedulerChan chan *scheduler.Scheduler, - internalCdrStatSChan chan engine.StatsInterface, internalHistorySChan chan history.Scribe, - internalPubSubSChan chan rpcclient.RpcClientConnection, internalUserSChan chan engine.UserService, internalAliaseSChan chan engine.AliasService, + +func startRater(internalRaterChan chan rpcclient.RpcClientConnection, cacheDoneChan chan struct{}, internalBalancerChan chan *balancer2go.Balancer, internalSchedulerChan chan *scheduler.Scheduler, + internalCdrStatSChan chan rpcclient.RpcClientConnection, internalHistorySChan chan rpcclient.RpcClientConnection, + internalPubSubSChan chan rpcclient.RpcClientConnection, internalUserSChan chan rpcclient.RpcClientConnection, internalAliaseSChan chan rpcclient.RpcClientConnection, server *utils.Server, ratingDb engine.RatingStorage, accountDb engine.AccountingStorage, loadDb engine.LoadStorage, cdrDb engine.CdrStorage, logDb engine.LogStorage, stopHandled *bool, exitChan chan bool) { @@ -84,15 +85,13 @@ func startRater(internalRaterChan chan *engine.Responder, cacheDoneChan chan str }() } - - // Connection to balancer var bal *balancer2go.Balancer - if cfg.RaterBalancer != "" { + if cfg.RALsBalancer != "" { // Connection to balancer balTaskChan := make(chan struct{}) waitTasks = append(waitTasks, balTaskChan) go func() { defer close(balTaskChan) - if cfg.RaterBalancer == utils.INTERNAL { + if cfg.RALsBalancer == utils.MetaInternal { select { case bal = <-internalBalancerChan: internalBalancerChan <- bal // Put it back if someone else is interested about @@ -108,144 +107,117 @@ func startRater(internalRaterChan chan *engine.Responder, cacheDoneChan chan str } }() } - - // Connection to CDRStats - var cdrStats engine.StatsInterface - if cfg.RaterCdrStats != "" { + var cdrStats *rpcclient.RpcClientPool + if len(cfg.RALsCDRStatSConns) != 0 { // Connections to CDRStats cdrstatTaskChan := make(chan struct{}) waitTasks = append(waitTasks, cdrstatTaskChan) go func() { defer close(cdrstatTaskChan) - if cfg.RaterCdrStats == utils.INTERNAL { - select { - case cdrStats = <-internalCdrStatSChan: - internalCdrStatSChan <- cdrStats - case <-time.After(cfg.InternalTtl): - utils.Logger.Crit(": Internal cdrstats connection timeout.") - exitChan <- true - return - } - } else if cdrStats, err = engine.NewProxyStats(cfg.RaterCdrStats, cfg.ConnectAttempts, -1); err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to cdrstats, error: %s", err.Error())) + cdrStats, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.RALsCDRStatSConns, internalCdrStatSChan, cfg.InternalTtl) + if err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to CDRStatS, error: %s", err.Error())) exitChan <- true return } }() } - - // Connection to HistoryS - if cfg.RaterHistoryServer != "" { + if len(cfg.RALsHistorySConns) != 0 { // Connection to HistoryS, histTaskChan := make(chan struct{}) waitTasks = append(waitTasks, histTaskChan) go func() { defer close(histTaskChan) - var scribeServer history.Scribe - if cfg.RaterHistoryServer == utils.INTERNAL { - select { - case scribeServer = <-internalHistorySChan: - internalHistorySChan <- scribeServer - case <-time.After(cfg.InternalTtl): - utils.Logger.Crit(": Internal historys connection timeout.") - exitChan <- true - return - } - } else if scribeServer, err = history.NewProxyScribe(cfg.RaterHistoryServer, cfg.ConnectAttempts, -1); err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect historys, error: %s", err.Error())) + if historySConns, err := engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.RALsHistorySConns, internalHistorySChan, cfg.InternalTtl); err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect HistoryS, error: %s", err.Error())) exitChan <- true return + } else { + engine.SetHistoryScribe(historySConns) } - engine.SetHistoryScribe(scribeServer) // ToDo: replace package sharing with connection based one }() } - - // Connection to pubsubs - if cfg.RaterPubSubServer != "" { + if len(cfg.RALsPubSubSConns) != 0 { // Connection to pubsubs pubsubTaskChan := make(chan struct{}) waitTasks = append(waitTasks, pubsubTaskChan) go func() { defer close(pubsubTaskChan) - var pubSubServer rpcclient.RpcClientConnection - if cfg.RaterPubSubServer == utils.INTERNAL { - select { - case pubSubServer = <-internalPubSubSChan: - internalPubSubSChan <- pubSubServer - case <-time.After(cfg.InternalTtl): - utils.Logger.Crit(": Internal pubsub connection timeout.") - exitChan <- true - return - } - } else if pubSubServer, err = rpcclient.NewRpcClient("tcp", cfg.RaterPubSubServer, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, nil); err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to pubsubs: %s", err.Error())) + if pubSubSConns, err := engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.RALsPubSubSConns, internalPubSubSChan, cfg.InternalTtl); err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to PubSubS: %s", err.Error())) exitChan <- true return + } else { + engine.SetPubSub(pubSubSConns) } - engine.SetPubSub(pubSubServer) // ToDo: replace package sharing with connection based one }() } - - // Connection to AliasService - if cfg.RaterAliasesServer != "" { + if len(cfg.RALsAliasSConns) != 0 { // Connection to AliasService aliasesTaskChan := make(chan struct{}) waitTasks = append(waitTasks, aliasesTaskChan) go func() { defer close(aliasesTaskChan) - var aliasesServer engine.AliasService - if cfg.RaterAliasesServer == utils.INTERNAL { - select { - case aliasesServer = <-internalAliaseSChan: - internalAliaseSChan <- aliasesServer - case <-time.After(cfg.InternalTtl): - utils.Logger.Crit(": Internal aliases connection timeout.") - exitChan <- true - return - } - } else if aliasesServer, err = engine.NewProxyAliasService(cfg.RaterAliasesServer, cfg.ConnectAttempts, -1); err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect to aliases, error: %s", err.Error())) + if aliaseSCons, err := engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.RALsAliasSConns, internalAliaseSChan, cfg.InternalTtl); err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect to AliaseS, error: %s", err.Error())) exitChan <- true return + } else { + engine.SetAliasService(aliaseSCons) } - engine.SetAliasService(aliasesServer) // ToDo: replace package sharing with connection based one }() } - - // Connection to UserService - var userServer engine.UserService - if cfg.RaterUserServer != "" { + var usersConns rpcclient.RpcClientConnection + if len(cfg.RALsUserSConns) != 0 { // Connection to UserService usersTaskChan := make(chan struct{}) waitTasks = append(waitTasks, usersTaskChan) go func() { defer close(usersTaskChan) - if cfg.RaterUserServer == utils.INTERNAL { - select { - case userServer = <-internalUserSChan: - internalUserSChan <- userServer - case <-time.After(cfg.InternalTtl): - utils.Logger.Crit(": Internal users connection timeout.") - exitChan <- true - return - } - } else if userServer, err = engine.NewProxyUserService(cfg.RaterUserServer, cfg.ConnectAttempts, -1); err != nil { - utils.Logger.Crit(fmt.Sprintf(" Could not connect users, error: %s", err.Error())) + if usersConns, err = engine.NewRPCPool(rpcclient.POOL_FIRST, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB, + cfg.RALsUserSConns, internalUserSChan, cfg.InternalTtl); err != nil { + utils.Logger.Crit(fmt.Sprintf(" Could not connect UserS, error: %s", err.Error())) exitChan <- true return } - engine.SetUserService(userServer) + engine.SetUserService(usersConns) }() } - // Wait for all connections to complete before going further for _, chn := range waitTasks { <-chn } - - responder := &engine.Responder{Bal: bal, ExitChan: exitChan, Stats: cdrStats} + responder := &engine.Responder{Bal: bal, ExitChan: exitChan} + responder.SetTimeToLive(cfg.ResponseCacheTTL, nil) apierRpcV1 := &v1.ApierV1{StorDb: loadDb, RatingDb: ratingDb, AccountDb: accountDb, CdrDb: cdrDb, LogDb: logDb, Sched: sched, - Config: cfg, Responder: responder, CdrStatsSrv: cdrStats, Users: userServer} + Config: cfg, Responder: responder} + if cdrStats != nil { // ToDo: Fix here properly the init of stats + responder.Stats = cdrStats + apierRpcV1.CdrStatsSrv = cdrStats + } + if usersConns != nil { + apierRpcV1.Users = usersConns + } apierRpcV2 := &v2.ApierV2{ ApierV1: *apierRpcV1} + // internalSchedulerChan shared here server.RpcRegister(responder) server.RpcRegister(apierRpcV1) server.RpcRegister(apierRpcV2) + + utils.RegisterRpcParams("", &engine.Stats{}) + utils.RegisterRpcParams("", &v1.CDRStatsV1{}) + utils.RegisterRpcParams("ScribeV1", &history.FileScribe{}) + utils.RegisterRpcParams("PubSubV1", &engine.PubSub{}) + utils.RegisterRpcParams("AliasesV1", &engine.AliasHandler{}) + utils.RegisterRpcParams("UsersV1", &engine.UserMap{}) + utils.RegisterRpcParams("", &v1.CdrsV1{}) + utils.RegisterRpcParams("", &v2.CdrsV2{}) + utils.RegisterRpcParams("", &v1.SessionManagerV1{}) + utils.RegisterRpcParams("", &v1.SMGenericV1{}) + utils.RegisterRpcParams("", responder) + utils.RegisterRpcParams("", apierRpcV1) + utils.RegisterRpcParams("", apierRpcV2) + utils.GetRpcParams("") internalRaterChan <- responder // Rater done } diff --git a/cmd/cgr-engine/registration.go b/cmd/cgr-engine/registration.go index 985d469bb..de7a724f1 100644 --- a/cmd/cgr-engine/registration.go +++ b/cmd/cgr-engine/registration.go @@ -29,6 +29,7 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/scheduler" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) /* @@ -43,7 +44,7 @@ func stopBalancerSignalHandler(bal *balancer2go.Balancer, exitChan chan bool) { exitChan <- true } -func generalSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exitChan chan bool) { +func generalSignalHandler(internalCdrStatSChan chan rpcclient.RpcClientConnection, exitChan chan bool) { c := make(chan os.Signal) signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT) @@ -52,7 +53,7 @@ func generalSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exitC var dummyInt int select { case cdrStats := <-internalCdrStatSChan: - cdrStats.Stop(dummyInt, &dummyInt) + cdrStats.Call("CDRStatsV1.Stop", dummyInt, &dummyInt) default: } @@ -62,7 +63,7 @@ func generalSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exitC /* Listens for the SIGTERM, SIGINT, SIGQUIT system signals and gracefuly unregister from balancer and closes the storage before exiting. */ -func stopRaterSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exitChan chan bool) { +func stopRaterSignalHandler(internalCdrStatSChan chan rpcclient.RpcClientConnection, exitChan chan bool) { c := make(chan os.Signal) signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT) sig := <-c @@ -72,7 +73,7 @@ func stopRaterSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exi var dummyInt int select { case cdrStats := <-internalCdrStatSChan: - cdrStats.Stop(dummyInt, &dummyInt) + cdrStats.Call("CDRStatsV1.Stop", dummyInt, &dummyInt) default: } exitChan <- true @@ -82,14 +83,14 @@ func stopRaterSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exi Connects to the balancer and calls unregister RPC method. */ func unregisterFromBalancer(exitChan chan bool) { - client, err := rpc.Dial("tcp", cfg.RaterBalancer) + client, err := rpc.Dial("tcp", cfg.RALsBalancer) if err != nil { utils.Logger.Crit("Cannot contact the balancer!") exitChan <- true return } var reply int - utils.Logger.Info(fmt.Sprintf("Unregistering from balancer %s", cfg.RaterBalancer)) + utils.Logger.Info(fmt.Sprintf("Unregistering from balancer %s", cfg.RALsBalancer)) client.Call("Responder.UnRegisterRater", cfg.RPCGOBListen, &reply) if err := client.Close(); err != nil { utils.Logger.Crit("Could not close balancer unregistration!") @@ -101,14 +102,14 @@ func unregisterFromBalancer(exitChan chan bool) { Connects to the balancer and rehisters the engine to the server. */ func registerToBalancer(exitChan chan bool) { - client, err := rpc.Dial("tcp", cfg.RaterBalancer) + client, err := rpc.Dial("tcp", cfg.RALsBalancer) if err != nil { utils.Logger.Crit(fmt.Sprintf("Cannot contact the balancer: %v", err)) exitChan <- true return } var reply int - utils.Logger.Info(fmt.Sprintf("Registering to balancer %s", cfg.RaterBalancer)) + utils.Logger.Info(fmt.Sprintf("Registering to balancer %s", cfg.RALsBalancer)) client.Call("Responder.RegisterRater", cfg.RPCGOBListen, &reply) if err := client.Close(); err != nil { utils.Logger.Crit("Could not close balancer registration!") diff --git a/cmd/cgr-loader/cgr-loader.go b/cmd/cgr-loader/cgr-loader.go index c818ac353..3ac60d57c 100644 --- a/cmd/cgr-loader/cgr-loader.go +++ b/cmd/cgr-loader/cgr-loader.go @@ -29,8 +29,8 @@ import ( "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/engine" - "github.com/cgrates/cgrates/history" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) var ( @@ -92,77 +92,107 @@ func main() { var rater, cdrstats, users *rpc.Client var loader engine.LoadReader if *migrateRC8 != "" { - var db_nb int - db_nb, err = strconv.Atoi(*datadb_name) - if err != nil { - log.Print("Redis db name must be an integer!") - return - } - host := *datadb_host - if *datadb_port != "" { - host += ":" + *datadb_port - } - migratorRC8acc, err := NewMigratorRC8(host, db_nb, *datadb_pass, *dbdata_encoding) - if err != nil { - log.Print(err.Error()) - return - } - if strings.Contains(*migrateRC8, "acc") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8acc.migrateAccounts(); err != nil { + if *datadb_type == "redis" && *tpdb_type == "redis" { + var db_nb int + db_nb, err = strconv.Atoi(*datadb_name) + if err != nil { + log.Print("Redis db name must be an integer!") + return + } + host := *datadb_host + if *datadb_port != "" { + host += ":" + *datadb_port + } + migratorRC8acc, err := NewMigratorRC8(host, db_nb, *datadb_pass, *dbdata_encoding) + if err != nil { log.Print(err.Error()) + return + } + if strings.Contains(*migrateRC8, "acc") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8acc.migrateAccounts(); err != nil { + log.Print(err.Error()) + } + } + + db_nb, err = strconv.Atoi(*tpdb_name) + if err != nil { + log.Print("Redis db name must be an integer!") + return + } + host = *tpdb_host + if *tpdb_port != "" { + host += ":" + *tpdb_port + } + migratorRC8rat, err := NewMigratorRC8(host, db_nb, *tpdb_pass, *dbdata_encoding) + if err != nil { + log.Print(err.Error()) + return + } + if strings.Contains(*migrateRC8, "atr") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8rat.migrateActionTriggers(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "act") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8rat.migrateActions(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "dcs") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8rat.migrateDerivedChargers(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "apl") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8rat.migrateActionPlans(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "shg") || strings.Contains(*migrateRC8, "*all") { + if err := migratorRC8rat.migrateSharedGroups(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "int") { + if err := migratorRC8acc.migrateAccountsInt(); err != nil { + log.Print(err.Error()) + } + if err := migratorRC8rat.migrateActionTriggersInt(); err != nil { + log.Print(err.Error()) + } + if err := migratorRC8rat.migrateActionsInt(); err != nil { + log.Print(err.Error()) + } + } + if strings.Contains(*migrateRC8, "vf") { + if err := migratorRC8rat.migrateActionsInt2(); err != nil { + log.Print(err.Error()) + } + if err := migratorRC8acc.writeVersion(); err != nil { + log.Print(err.Error()) + } + } + } else if *datadb_type == "mongo" && *tpdb_type == "mongo" { + mongoMigratorAcc, err := NewMongoMigrator(*datadb_host, *datadb_port, *datadb_name, *datadb_user, *datadb_pass) + if err != nil { + log.Print(err.Error()) + return + } + mongoMigratorRat, err := NewMongoMigrator(*tpdb_host, *tpdb_port, *tpdb_name, *tpdb_user, *tpdb_pass) + if err != nil { + log.Print(err.Error()) + return + } + if strings.Contains(*migrateRC8, "vf") { + if err := mongoMigratorRat.migrateActions(); err != nil { + log.Print(err.Error()) + } + if err := mongoMigratorAcc.writeVersion(); err != nil { + log.Print(err.Error()) + } } } - db_nb, err = strconv.Atoi(*tpdb_name) - if err != nil { - log.Print("Redis db name must be an integer!") - return - } - host = *tpdb_host - if *tpdb_port != "" { - host += ":" + *tpdb_port - } - migratorRC8rat, err := NewMigratorRC8(host, db_nb, *tpdb_pass, *dbdata_encoding) - if err != nil { - log.Print(err.Error()) - return - } - if strings.Contains(*migrateRC8, "atr") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8rat.migrateActionTriggers(); err != nil { - log.Print(err.Error()) - } - } - if strings.Contains(*migrateRC8, "act") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8rat.migrateActions(); err != nil { - log.Print(err.Error()) - } - } - if strings.Contains(*migrateRC8, "dcs") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8rat.migrateDerivedChargers(); err != nil { - log.Print(err.Error()) - } - } - if strings.Contains(*migrateRC8, "apl") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8rat.migrateActionPlans(); err != nil { - log.Print(err.Error()) - } - } - if strings.Contains(*migrateRC8, "shg") || strings.Contains(*migrateRC8, "*all") { - if err := migratorRC8rat.migrateSharedGroups(); err != nil { - log.Print(err.Error()) - } - } - if strings.Contains(*migrateRC8, "int") { - if err := migratorRC8acc.migrateAccountsInt(); err != nil { - log.Print(err.Error()) - } - if err := migratorRC8rat.migrateActionTriggersInt(); err != nil { - log.Print(err.Error()) - } - if err := migratorRC8rat.migrateActionsInt(); err != nil { - log.Print(err.Error()) - } - } log.Print("Done!") return } @@ -257,7 +287,7 @@ func main() { return } if *historyServer != "" { // Init scribeAgent so we can store the differences - if scribeAgent, err := history.NewProxyScribe(*historyServer, 3, 3); err != nil { + if scribeAgent, err := rpcclient.NewRpcClient("tcp", *historyServer, 3, 3, utils.GOB, nil); err != nil { log.Fatalf("Could not connect to history server, error: %s. Make sure you have properly configured it via -history_server flag.", err.Error()) return } else { diff --git a/cmd/cgr-loader/migrator_mongo.go b/cmd/cgr-loader/migrator_mongo.go new file mode 100644 index 000000000..9436dfd25 --- /dev/null +++ b/cmd/cgr-loader/migrator_mongo.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "log" + + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" +) + +type MongoMigrator struct { + session *mgo.Session + db *mgo.Database +} + +func NewMongoMigrator(host, port, db, user, pass string) (*MongoMigrator, error) { + address := fmt.Sprintf("%s:%s", host, port) + if user != "" && pass != "" { + address = fmt.Sprintf("%s:%s@%s", user, pass, address) + } + session, err := mgo.Dial(address) + if err != nil { + return nil, err + } + ndb := session.DB(db) + return &MongoMigrator{session: session, db: ndb}, nil +} + +func (mig MongoMigrator) migrateActions() error { + newAcsMap := make(map[string]engine.Actions) + iter := mig.db.C("actions").Find(nil).Iter() + var oldAcs struct { + Key string + Value Actions2 + } + for iter.Next(&oldAcs) { + log.Printf("Migrating action: %s...", oldAcs.Key) + newAcs := make(engine.Actions, len(oldAcs.Value)) + for index, oldAc := range oldAcs.Value { + a := &engine.Action{ + Id: oldAc.Id, + ActionType: oldAc.ActionType, + ExtraParameters: oldAc.ExtraParameters, + ExpirationString: oldAc.ExpirationString, + Filter: oldAc.Filter, + Weight: oldAc.Weight, + Balance: &engine.BalanceFilter{ + Uuid: oldAc.Balance.Uuid, + ID: oldAc.Balance.ID, + Type: oldAc.Balance.Type, + Directions: oldAc.Balance.Directions, + ExpirationDate: oldAc.Balance.ExpirationDate, + Weight: oldAc.Balance.Weight, + DestinationIDs: oldAc.Balance.DestinationIDs, + RatingSubject: oldAc.Balance.RatingSubject, + Categories: oldAc.Balance.Categories, + SharedGroups: oldAc.Balance.SharedGroups, + TimingIDs: oldAc.Balance.TimingIDs, + Timings: oldAc.Balance.Timings, + Disabled: oldAc.Balance.Disabled, + Factor: oldAc.Balance.Factor, + Blocker: oldAc.Balance.Blocker, + }, + } + if oldAc.Balance.Value != nil { + a.Balance.Value = &utils.ValueFormula{Static: *oldAc.Balance.Value} + } + newAcs[index] = a + } + newAcsMap[oldAcs.Key] = newAcs + } + if err := iter.Close(); err != nil { + return err + } + + // write data back + for key, acs := range newAcsMap { + if _, err := mig.db.C("actions").Upsert(bson.M{"key": key}, &struct { + Key string + Value engine.Actions + }{Key: key, Value: acs}); err != nil { + return err + } + } + return nil +} + +func (mig MongoMigrator) writeVersion() error { + _, err := mig.db.C("versions").Upsert(bson.M{"key": utils.VERSION_PREFIX + "struct"}, &struct { + Key string + Value *engine.StructVersion + }{utils.VERSION_PREFIX + "struct", engine.CurrentVersion}) + return err +} diff --git a/cmd/cgr-loader/migrator_rc8.go b/cmd/cgr-loader/migrator_rc8.go index 9d085d2f6..15579db98 100644 --- a/cmd/cgr-loader/migrator_rc8.go +++ b/cmd/cgr-loader/migrator_rc8.go @@ -482,7 +482,7 @@ func (mig MigratorRC8) migrateActions() error { bf.Type = utils.StringPointer(oldAc.BalanceType) } if oldAc.Balance.Value != 0 { - bf.Value = utils.Float64Pointer(oldAc.Balance.Value) + bf.Value = &utils.ValueFormula{Static: oldAc.Balance.Value} } if oldAc.Balance.RatingSubject != "" { bf.RatingSubject = utils.StringPointer(oldAc.Balance.RatingSubject) @@ -669,3 +669,11 @@ func (mig MigratorRC8) migrateSharedGroups() error { } return nil } + +func (mig MigratorRC8) writeVersion() error { + result, err := mig.ms.Marshal(engine.CurrentVersion) + if err != nil { + return err + } + return mig.db.Cmd("SET", utils.VERSION_PREFIX+"struct", result).Err +} diff --git a/cmd/cgr-loader/migrator_rc8int.go b/cmd/cgr-loader/migrator_rc8int.go index 41f55afb1..dd04b91ec 100644 --- a/cmd/cgr-loader/migrator_rc8int.go +++ b/cmd/cgr-loader/migrator_rc8int.go @@ -364,7 +364,7 @@ func (mig MigratorRC8) migrateActionsInt() error { bf.Type = utils.StringPointer(oldAc.BalanceType) } if oldAc.Balance.Value != 0 { - bf.Value = utils.Float64Pointer(oldAc.Balance.Value) + bf.Value = &utils.ValueFormula{Static: oldAc.Balance.Value} } if oldAc.Balance.RatingSubject != "" { bf.RatingSubject = utils.StringPointer(oldAc.Balance.RatingSubject) diff --git a/cmd/cgr-loader/migrator_rc8int2.go b/cmd/cgr-loader/migrator_rc8int2.go new file mode 100644 index 000000000..46c7caeeb --- /dev/null +++ b/cmd/cgr-loader/migrator_rc8int2.go @@ -0,0 +1,102 @@ +package main + +import ( + "log" + "time" + + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" +) + +type BalanceFilter2 struct { + Uuid *string + ID *string + Type *string + Value *float64 + Directions *utils.StringMap + ExpirationDate *time.Time + Weight *float64 + DestinationIDs *utils.StringMap + RatingSubject *string + Categories *utils.StringMap + SharedGroups *utils.StringMap + TimingIDs *utils.StringMap + Timings []*engine.RITiming + Disabled *bool + Factor *engine.ValueFactor + Blocker *bool +} + +type Action2 struct { + Id string + ActionType string + ExtraParameters string + Filter string + ExpirationString string // must stay as string because it can have relative values like 1month + Weight float64 + Balance *BalanceFilter2 +} + +type Actions2 []*Action2 + +func (mig MigratorRC8) migrateActionsInt2() error { + keys, err := mig.db.Cmd("KEYS", utils.ACTION_PREFIX+"*").List() + if err != nil { + return err + } + newAcsMap := make(map[string]engine.Actions, len(keys)) + for _, key := range keys { + log.Printf("Migrating action: %s...", key) + var oldAcs Actions2 + var values []byte + if values, err = mig.db.Cmd("GET", key).Bytes(); err == nil { + if err := mig.ms.Unmarshal(values, &oldAcs); err != nil { + return err + } + } + newAcs := make(engine.Actions, len(oldAcs)) + for index, oldAc := range oldAcs { + a := &engine.Action{ + Id: oldAc.Id, + ActionType: oldAc.ActionType, + ExtraParameters: oldAc.ExtraParameters, + ExpirationString: oldAc.ExpirationString, + Filter: oldAc.Filter, + Weight: oldAc.Weight, + Balance: &engine.BalanceFilter{ + Uuid: oldAc.Balance.Uuid, + ID: oldAc.Balance.ID, + Type: oldAc.Balance.Type, + Directions: oldAc.Balance.Directions, + ExpirationDate: oldAc.Balance.ExpirationDate, + Weight: oldAc.Balance.Weight, + DestinationIDs: oldAc.Balance.DestinationIDs, + RatingSubject: oldAc.Balance.RatingSubject, + Categories: oldAc.Balance.Categories, + SharedGroups: oldAc.Balance.SharedGroups, + TimingIDs: oldAc.Balance.TimingIDs, + Timings: oldAc.Balance.Timings, + Disabled: oldAc.Balance.Disabled, + Factor: oldAc.Balance.Factor, + Blocker: oldAc.Balance.Blocker, + }, + } + if oldAc.Balance.Value != nil { + a.Balance.Value = &utils.ValueFormula{Static: *oldAc.Balance.Value} + } + newAcs[index] = a + } + newAcsMap[key] = newAcs + } + // write data back + for key, acs := range newAcsMap { + result, err := mig.ms.Marshal(&acs) + if err != nil { + return err + } + if err = mig.db.Cmd("SET", key, result).Err; err != nil { + return err + } + } + return nil +} diff --git a/cmd/cgr-tester/cgr-tester.go b/cmd/cgr-tester/cgr-tester.go index 1fbdc9c66..e4597655d 100644 --- a/cmd/cgr-tester/cgr-tester.go +++ b/cmd/cgr-tester/cgr-tester.go @@ -153,7 +153,6 @@ func durRemoteRater(cd *engine.CallDescriptor) (time.Duration, error) { func main() { flag.Parse() - runtime.GOMAXPROCS(runtime.NumCPU() - 1) if *cpuprofile != "" { f, err := os.Create(*cpuprofile) diff --git a/cmd/cgr-tester/parallel/parallel.go b/cmd/cgr-tester/parallel/parallel.go new file mode 100644 index 000000000..b249dec36 --- /dev/null +++ b/cmd/cgr-tester/parallel/parallel.go @@ -0,0 +1,44 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net/http" + "sync" +) + +func main() { + log.Print("Start!") + var wg sync.WaitGroup + for i := 1; i < 1002; i++ { + go func(index int) { + wg.Add(1) + resp, err := http.Post("http://localhost:2080/jsonrpc", "application/json", bytes.NewBuffer([]byte(fmt.Sprintf(`{"method": "ApierV1.SetAccount","params": [{"Tenant":"reglo","Account":"100%d","ActionPlanId":"PACKAGE_NEW_FOR795", "ReloadScheduler":false}], "id":%d}`, index, index)))) + if err != nil { + log.Print("Post error: ", err) + } + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Print("Body error: ", err) + } + log.Printf("SetAccount(%d): %s", index, string(contents)) + wg.Done() + }(i) + } + wg.Wait() + for index := 1; index < 1002; index++ { + resp, err := http.Post("http://localhost:2080/jsonrpc", "application/json", bytes.NewBuffer([]byte(fmt.Sprintf(`{"method": "ApierV1.GetAccountActionPlan","params": [{"Tenant":"reglo","Account":"100%d"}], "id":%d}`, index, index)))) + if err != nil { + log.Print("Post error: ", err) + } + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Print("Body error: ", err) + } + log.Printf("GetAccountActionPlan(%d): %s", index, string(contents)) + } + + log.Print("Done!") +} diff --git a/config/cdrcconfig.go b/config/cdrcconfig.go index 00005be51..c4b6c9bdd 100644 --- a/config/cdrcconfig.go +++ b/config/cdrcconfig.go @@ -25,9 +25,10 @@ import ( ) type CdrcConfig struct { + ID string // free-form text identifying this CDRC instance Enabled bool // Enable/Disable the profile DryRun bool // Do not post CDRs to the server - Cdrs string // The address where CDRs can be reached + CdrsConns []*HaPoolConfig // The address where CDRs can be reached CdrFormat string // The type of CDR file to process FieldSeparator rune // The separator to use when reading csvs DataUsageMultiplyFactor float64 // Conversion factor for data usage @@ -51,14 +52,21 @@ func (self *CdrcConfig) loadFromJsonCfg(jsnCfg *CdrcJsonCfg) error { return nil } var err error + if jsnCfg.Id != nil { + self.ID = *jsnCfg.Id + } if jsnCfg.Enabled != nil { self.Enabled = *jsnCfg.Enabled } if jsnCfg.Dry_run != nil { self.DryRun = *jsnCfg.Dry_run } - if jsnCfg.Cdrs != nil { - self.Cdrs = *jsnCfg.Cdrs + if jsnCfg.Cdrs_conns != nil { + self.CdrsConns = make([]*HaPoolConfig, len(*jsnCfg.Cdrs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Cdrs_conns { + self.CdrsConns[idx] = NewDfltHaPoolConfig() + self.CdrsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Cdr_format != nil { self.CdrFormat = *jsnCfg.Cdr_format @@ -125,8 +133,13 @@ func (self *CdrcConfig) loadFromJsonCfg(jsnCfg *CdrcJsonCfg) error { // Clone itself into a new CdrcConfig func (self *CdrcConfig) Clone() *CdrcConfig { clnCdrc := new(CdrcConfig) + clnCdrc.ID = self.ID clnCdrc.Enabled = self.Enabled - clnCdrc.Cdrs = self.Cdrs + clnCdrc.CdrsConns = make([]*HaPoolConfig, len(self.CdrsConns)) + for idx, cdrConn := range self.CdrsConns { + clonedVal := *cdrConn + clnCdrc.CdrsConns[idx] = &clonedVal + } clnCdrc.CdrFormat = self.CdrFormat clnCdrc.FieldSeparator = self.FieldSeparator clnCdrc.DataUsageMultiplyFactor = self.DataUsageMultiplyFactor diff --git a/config/cfg_data.json b/config/cfg_data.json index f5eb4a811..e01b9dee0 100644 --- a/config/cfg_data.json +++ b/config/cfg_data.json @@ -7,25 +7,27 @@ // This is what you get when you load CGRateS with an empty configuration file. "general": { - "default_reqtype": "*pseudoprepaid", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> + "default_request_type": "*pseudoprepaid", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> }, "cdrs": { "enabled": true, // start the CDR Server service: }, -"rater": { +"rals": { "enabled": true, // enable Rater service: }, -"cdrc": { - "CDRC-CSV1": { +"cdrc": [ + { + "id": "CDRC-CSV1", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc1/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc1/out", // absolute path towards the directory where processed CDRs will be moved "cdr_source_id": "csv1", // free form field, tag identifying the source of the CDRs within CDRS database }, - "CDRC-CSV2": { + { + "id": "CDRC-CSV2", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc2/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc2/out", // absolute path towards the directory where processed CDRs will be moved @@ -38,13 +40,13 @@ {"field_id": "Usage", "value": "~9:s/^(\\d+)$/${1}s/"}, ], }, -}, +], "sm_freeswitch": { "enabled": true, // starts SessionManager service: - "connections":[ // instantiate connections to multiple FreeSWITCH servers - {"server": "1.2.3.4:8021", "password": "ClueCon", "reconnects": 5}, - {"server": "2.3.4.5:8021", "password": "ClueCon", "reconnects": 5}, + "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers + {"address": "1.2.3.4:8021", "password": "ClueCon", "reconnects": 5}, + {"address": "2.3.4.5:8021", "password": "ClueCon", "reconnects": 5}, ], }, diff --git a/config/cfg_data2.json b/config/cfg_data2.json index 67b846168..5115f5364 100644 --- a/config/cfg_data2.json +++ b/config/cfg_data2.json @@ -1,29 +1,19 @@ { -"cdrc": { - "CDRC-CSV2": { - "enabled": true, // enable CDR client functionality - "cdr_in_dir": "/tmp/cgrates/cdrc2/in", // absolute path towards the directory where the CDRs are stored - "cdr_out_dir": "/tmp/cgrates/cdrc2/out", // absolute path towards the directory where processed CDRs will be moved - "data_usage_multiply_factor": 0.000976563, - "cdr_source_id": "csv2", // free form field, tag identifying the source of the CDRs within CDRS database - "content_fields":[ // import template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value - {"field_id": "ToR", "value": "~7:s/^(voice|data|sms|generic)$/*$1/"}, - {"field_id": "AnswerTime", "value": "2"}, - ], - }, - "CDRC-CSV3": { +"cdrc": [ + { + "id": "CDRC-CSV3", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc3/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc3/out", // absolute path towards the directory where processed CDRs will be moved "cdr_source_id": "csv3", // free form field, tag identifying the source of the CDRs within CDRS database }, -}, +], "sm_freeswitch": { "enabled": true, // starts SessionManager service: - "connections":[ // instantiate connections to multiple FreeSWITCH servers - {"server": "2.3.4.5:8021", "password": "ClueCon", "reconnects": 5}, + "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers + {"address": "2.3.4.5:8021", "password": "ClueCon", "reconnects": 5}, ], }, diff --git a/config/config.go b/config/config.go index 72a1bf98c..736923a6c 100644 --- a/config/config.go +++ b/config/config.go @@ -60,6 +60,7 @@ func SetCgrConfig(cfg *CGRConfig) { func NewDefaultCGRConfig() (*CGRConfig, error) { cfg := new(CGRConfig) + cfg.InstanceID = utils.GenUUID() cfg.DataFolderPath = "/usr/share/cgrates/" cfg.SmGenericConfig = new(SmGenericConfig) cfg.SmFsConfig = new(SmFsConfig) @@ -84,9 +85,9 @@ func NewDefaultCGRConfig() (*CGRConfig, error) { return nil, err } cfg.dfltCdreProfile = cfg.CdreProfiles[utils.META_DEFAULT].Clone() // So default will stay unique, will have nil pointer in case of no defaults loaded which is an extra check - cfg.dfltCdrcProfile = cfg.CdrcProfiles["/var/log/cgrates/cdrc/in"][utils.META_DEFAULT].Clone() - dfltFsConnConfig = cfg.SmFsConfig.Connections[0] // We leave it crashing here on purpose if no Connection defaults defined - dfltKamConnConfig = cfg.SmKamConfig.Connections[0] + cfg.dfltCdrcProfile = cfg.CdrcProfiles["/var/log/cgrates/cdrc/in"][0].Clone() + dfltFsConnConfig = cfg.SmFsConfig.EventSocketConns[0] // We leave it crashing here on purpose if no Connection defaults defined + dfltKamConnConfig = cfg.SmKamConfig.EvapiConns[0] if err := cfg.checkConfigSanity(); err != nil { return nil, err } @@ -167,89 +168,90 @@ func NewCGRConfigFromFolder(cfgDir string) (*CGRConfig, error) { // Holds system configuration, defaults are overwritten with values from config file if found type CGRConfig struct { - TpDbType string - TpDbHost string // The host to connect to. Values that start with / are for UNIX domain sockets. - TpDbPort string // The port to bind to. - TpDbName string // The name of the database to connect to. - TpDbUser string // The user to sign in as. - TpDbPass string // The user's password. - DataDbType string - DataDbHost string // The host to connect to. Values that start with / are for UNIX domain sockets. - DataDbPort string // The port to bind to. - DataDbName string // The name of the database to connect to. - DataDbUser string // The user to sign in as. - DataDbPass string // The user's password. - LoadHistorySize int // Maximum number of records to archive in load history - StorDBType string // Should reflect the database type used to store logs - StorDBHost string // The host to connect to. Values that start with / are for UNIX domain sockets. - StorDBPort string // Th e port to bind to. - StorDBName string // The name of the database to connect to. - StorDBUser string // The user to sign in as. - StorDBPass string // The user's password. - StorDBMaxOpenConns int // Maximum database connections opened - StorDBMaxIdleConns int // Maximum idle connections to keep opened - StorDBCDRSIndexes []string - DBDataEncoding string // The encoding used to store object data in strings: - RPCJSONListen string // RPC JSON listening address - RPCGOBListen string // RPC GOB listening address - HTTPListen string // HTTP listening address - DefaultReqType string // Use this request type if not defined on top - DefaultCategory string // set default type of record - DefaultTenant string // set default tenant - DefaultSubject string // set default rating subject, useful in case of fallback - DefaultTimezone string // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> - Reconnects int // number of recconect attempts in case of connection lost <-1 for infinite | nb> - ConnectAttempts int // number of initial connection attempts before giving up - ResponseCacheTTL time.Duration // the life span of a cached response - InternalTtl time.Duration // maximum duration to wait for internal connections before giving up - RoundingDecimals int // Number of decimals to round end prices at - HttpSkipTlsVerify bool // If enabled Http Client will accept any TLS certificate - TpExportPath string // Path towards export folder for offline Tariff Plans - HttpFailedDir string // Directory path where we store failed http requests - MaxCallDuration time.Duration // The maximum call duration (used by responder when querying DerivedCharging) // ToDo: export it in configuration file - RaterEnabled bool // start standalone server (no balancer) - RaterBalancer string // balancer address host:port - RaterCdrStats string // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> - RaterHistoryServer string - RaterPubSubServer string - RaterUserServer string - RaterAliasesServer string - RpSubjectPrefixMatching bool // enables prefix matching for the rating profile subject - BalancerEnabled bool - SchedulerEnabled bool - CDRSEnabled bool // Enable CDR Server service - CDRSExtraFields []*utils.RSRField // Extra fields to store in CDRs - CDRSStoreCdrs bool // store cdrs in storDb - CDRSRater string // address where to reach the Rater for cost calculation: <""|internal|x.y.z.y:1234> - CDRSPubSub string // address where to reach the pubsub service: <""|internal|x.y.z.y:1234> - CDRSUsers string // address where to reach the users service: <""|internal|x.y.z.y:1234> - CDRSAliases string // address where to reach the aliases service: <""|internal|x.y.z.y:1234> - CDRSStats string // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> - CDRSCdrReplication []*CdrReplicationCfg // Replicate raw CDRs to a number of servers - CDRStatsEnabled bool // Enable CDR Stats service - CDRStatsSaveInterval time.Duration // Save interval duration - CdreProfiles map[string]*CdreConfig - CdrcProfiles map[string]map[string]*CdrcConfig // Number of CDRC instances running imports, format map[dirPath]map[instanceName]{Configs} - SmGenericConfig *SmGenericConfig - SmFsConfig *SmFsConfig // SM-FreeSWITCH configuration - SmKamConfig *SmKamConfig // SM-Kamailio Configuration - SmOsipsConfig *SmOsipsConfig // SM-OpenSIPS Configuration - diameterAgentCfg *DiameterAgentCfg // DiameterAgent configuration - HistoryServer string // Address where to reach the master history server: - HistoryServerEnabled bool // Starts History as server: . - HistoryDir string // Location on disk where to store history files. - HistorySaveInterval time.Duration // The timout duration between pubsub writes - PubSubServerEnabled bool // Starts PubSub as server: . - AliasesServerEnabled bool // Starts PubSub as server: . - UserServerEnabled bool // Starts User as server: - UserServerIndexes []string // List of user profile field indexes - MailerServer string // The server to use when sending emails out - MailerAuthUser string // Authenticate to email server using this user - MailerAuthPass string // Authenticate to email server with this password - MailerFromAddr string // From address used when sending emails out - DataFolderPath string // Path towards data folder, for tests internal usage, not loading out of .json options - sureTaxCfg *SureTaxCfg // Load here SureTax configuration, as pointer so we can have runtime reloads in the future - ConfigReloads map[string]chan struct{} // Signals to specific entities that a config reload should occur + InstanceID string // Identifier for this engine instance + TpDbType string + TpDbHost string // The host to connect to. Values that start with / are for UNIX domain sockets. + TpDbPort string // The port to bind to. + TpDbName string // The name of the database to connect to. + TpDbUser string // The user to sign in as. + TpDbPass string // The user's password. + DataDbType string + DataDbHost string // The host to connect to. Values that start with / are for UNIX domain sockets. + DataDbPort string // The port to bind to. + DataDbName string // The name of the database to connect to. + DataDbUser string // The user to sign in as. + DataDbPass string // The user's password. + LoadHistorySize int // Maximum number of records to archive in load history + StorDBType string // Should reflect the database type used to store logs + StorDBHost string // The host to connect to. Values that start with / are for UNIX domain sockets. + StorDBPort string // Th e port to bind to. + StorDBName string // The name of the database to connect to. + StorDBUser string // The user to sign in as. + StorDBPass string // The user's password. + StorDBMaxOpenConns int // Maximum database connections opened + StorDBMaxIdleConns int // Maximum idle connections to keep opened + StorDBCDRSIndexes []string + DBDataEncoding string // The encoding used to store object data in strings: + RPCJSONListen string // RPC JSON listening address + RPCGOBListen string // RPC GOB listening address + HTTPListen string // HTTP listening address + DefaultReqType string // Use this request type if not defined on top + DefaultCategory string // set default type of record + DefaultTenant string // set default tenant + DefaultTimezone string // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> + Reconnects int // number of recconect attempts in case of connection lost <-1 for infinite | nb> + ConnectAttempts int // number of initial connection attempts before giving up + ResponseCacheTTL time.Duration // the life span of a cached response + InternalTtl time.Duration // maximum duration to wait for internal connections before giving up + RoundingDecimals int // Number of decimals to round end prices at + HttpSkipTlsVerify bool // If enabled Http Client will accept any TLS certificate + TpExportPath string // Path towards export folder for offline Tariff Plans + HttpFailedDir string // Directory path where we store failed http requests + MaxCallDuration time.Duration // The maximum call duration (used by responder when querying DerivedCharging) // ToDo: export it in configuration file + RALsEnabled bool // start standalone server (no balancer) + RALsBalancer string // balancer address host:port + RALsCDRStatSConns []*HaPoolConfig // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> + RALsHistorySConns []*HaPoolConfig + RALsPubSubSConns []*HaPoolConfig + RALsUserSConns []*HaPoolConfig + RALsAliasSConns []*HaPoolConfig + RpSubjectPrefixMatching bool // enables prefix matching for the rating profile subject + LcrSubjectPrefixMatching bool // enables prefix matching for the lcr subject + BalancerEnabled bool + SchedulerEnabled bool + CDRSEnabled bool // Enable CDR Server service + CDRSExtraFields []*utils.RSRField // Extra fields to store in CDRs + CDRSStoreCdrs bool // store cdrs in storDb + CDRSRaterConns []*HaPoolConfig // address where to reach the Rater for cost calculation: <""|internal|x.y.z.y:1234> + CDRSPubSubSConns []*HaPoolConfig // address where to reach the pubsub service: <""|internal|x.y.z.y:1234> + CDRSUserSConns []*HaPoolConfig // address where to reach the users service: <""|internal|x.y.z.y:1234> + CDRSAliaseSConns []*HaPoolConfig // address where to reach the aliases service: <""|internal|x.y.z.y:1234> + CDRSStatSConns []*HaPoolConfig // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> + CDRSCdrReplication []*CdrReplicationCfg // Replicate raw CDRs to a number of servers + CDRStatsEnabled bool // Enable CDR Stats service + CDRStatsSaveInterval time.Duration // Save interval duration + CdreProfiles map[string]*CdreConfig + CdrcProfiles map[string][]*CdrcConfig // Number of CDRC instances running imports, format map[dirPath][]{Configs} + SmGenericConfig *SmGenericConfig + SmFsConfig *SmFsConfig // SMFreeSWITCH configuration + SmKamConfig *SmKamConfig // SM-Kamailio Configuration + SmOsipsConfig *SmOsipsConfig // SMOpenSIPS Configuration + diameterAgentCfg *DiameterAgentCfg // DiameterAgent configuration + HistoryServer string // Address where to reach the master history server: + HistoryServerEnabled bool // Starts History as server: . + HistoryDir string // Location on disk where to store history files. + HistorySaveInterval time.Duration // The timout duration between pubsub writes + PubSubServerEnabled bool // Starts PubSub as server: . + AliasesServerEnabled bool // Starts PubSub as server: . + UserServerEnabled bool // Starts User as server: + UserServerIndexes []string // List of user profile field indexes + MailerServer string // The server to use when sending emails out + MailerAuthUser string // Authenticate to email server using this user + MailerAuthPass string // Authenticate to email server with this password + MailerFromAddr string // From address used when sending emails out + DataFolderPath string // Path towards data folder, for tests internal usage, not loading out of .json options + sureTaxCfg *SureTaxCfg // Load here SureTax configuration, as pointer so we can have runtime reloads in the future + ConfigReloads map[string]chan struct{} // Signals to specific entities that a config reload should occur // Cache defaults loaded from json and needing clones dfltCdreProfile *CdreConfig // Default cdreConfig profile dfltCdrcProfile *CdrcConfig // Default cdrcConfig profile @@ -257,42 +259,62 @@ type CGRConfig struct { func (self *CGRConfig) checkConfigSanity() error { // Rater checks - if self.RaterEnabled { - if self.RaterBalancer == utils.INTERNAL && !self.BalancerEnabled { + if self.RALsEnabled { + if self.RALsBalancer == utils.MetaInternal && !self.BalancerEnabled { return errors.New("Balancer not enabled but requested by Rater component.") } - if self.RaterCdrStats == utils.INTERNAL && !self.CDRStatsEnabled { - return errors.New("CDRStats not enabled but requested by Rater component.") + for _, connCfg := range self.RALsCDRStatSConns { + if connCfg.Address == utils.MetaInternal && !self.CDRStatsEnabled { + return errors.New("CDRStats not enabled but requested by Rater component.") + } } - if self.RaterHistoryServer == utils.INTERNAL && !self.HistoryServerEnabled { - return errors.New("History server not enabled but requested by Rater component.") + for _, connCfg := range self.RALsHistorySConns { + if connCfg.Address == utils.MetaInternal && !self.HistoryServerEnabled { + return errors.New("History server not enabled but requested by Rater component.") + } } - if self.RaterPubSubServer == utils.INTERNAL && !self.PubSubServerEnabled { - return errors.New("PubSub server not enabled but requested by Rater component.") + for _, connCfg := range self.RALsPubSubSConns { + if connCfg.Address == utils.MetaInternal && !self.PubSubServerEnabled { + return errors.New("PubSub server not enabled but requested by Rater component.") + } } - if self.RaterAliasesServer == utils.INTERNAL && !self.AliasesServerEnabled { - return errors.New("Aliases server not enabled but requested by Rater component.") + for _, connCfg := range self.RALsAliasSConns { + if connCfg.Address == utils.MetaInternal && !self.AliasesServerEnabled { + return errors.New("Alias server not enabled but requested by Rater component.") + } } - if self.RaterUserServer == utils.INTERNAL && !self.UserServerEnabled { - return errors.New("Users service not enabled but requested by Rater component.") + for _, connCfg := range self.RALsUserSConns { + if connCfg.Address == utils.MetaInternal && !self.UserServerEnabled { + return errors.New("User service not enabled but requested by Rater component.") + } } } // CDRServer checks if self.CDRSEnabled { - if self.CDRSRater == utils.INTERNAL && !self.RaterEnabled { - return errors.New("Rater not enabled but requested by CDRS component.") + for _, cdrsRaterConn := range self.CDRSRaterConns { + if cdrsRaterConn.Address == utils.MetaInternal && !self.RALsEnabled { + return errors.New("RALs not enabled but requested by CDRS component.") + } } - if self.CDRSPubSub == utils.INTERNAL && !self.PubSubServerEnabled { - return errors.New("PubSub service not enabled but requested by CDRS component.") + for _, connCfg := range self.CDRSPubSubSConns { + if connCfg.Address == utils.MetaInternal && !self.PubSubServerEnabled { + return errors.New("PubSubS not enabled but requested by CDRS component.") + } } - if self.CDRSUsers == utils.INTERNAL && !self.UserServerEnabled { - return errors.New("Users service not enabled but requested by CDRS component.") + for _, connCfg := range self.CDRSUserSConns { + if connCfg.Address == utils.MetaInternal && !self.UserServerEnabled { + return errors.New("UserS not enabled but requested by CDRS component.") + } } - if self.CDRSAliases == utils.INTERNAL && !self.AliasesServerEnabled { - return errors.New("Aliases service not enabled but requested by CDRS component.") + for _, connCfg := range self.CDRSAliaseSConns { + if connCfg.Address == utils.MetaInternal && !self.AliasesServerEnabled { + return errors.New("AliaseS not enabled but requested by CDRS component.") + } } - if self.CDRSStats == utils.INTERNAL && !self.CDRStatsEnabled { - return errors.New("CDRStats not enabled but requested by CDRS component.") + for _, connCfg := range self.CDRSStatSConns { + if connCfg.Address == utils.MetaInternal && !self.CDRStatsEnabled { + return errors.New("CDRStatS not enabled but requested by CDRS component.") + } } } // CDRC sanity checks @@ -301,10 +323,13 @@ func (self *CGRConfig) checkConfigSanity() error { if !cdrcInst.Enabled { continue } - if len(cdrcInst.Cdrs) == 0 { - return errors.New("CdrC enabled but no CDRS defined!") - } else if cdrcInst.Cdrs == utils.INTERNAL && !self.CDRSEnabled { - return errors.New("CDRS not enabled but referenced from CDRC") + if len(cdrcInst.CdrsConns) == 0 { + return fmt.Errorf(" Instance: %s, CdrC enabled but no CDRS defined!", cdrcInst.ID) + } + for _, conn := range cdrcInst.CdrsConns { + if conn.Address == utils.MetaInternal && !self.CDRSEnabled { + return errors.New("CDRS not enabled but referenced from CDRC") + } } if len(cdrcInst.ContentFields) == 0 { return errors.New("CdrC enabled but no fields to be processed defined!") @@ -320,70 +345,94 @@ func (self *CGRConfig) checkConfigSanity() error { } } } - // SM-Generic checks + // SMGeneric checks if self.SmGenericConfig.Enabled { - if len(self.SmGenericConfig.HaRater) == 0 { - return errors.New("Rater definition is mandatory!") + if len(self.SmGenericConfig.RALsConns) == 0 { + return errors.New(" RALs definition is mandatory!") } - if len(self.SmGenericConfig.HaCdrs) == 0 { - return errors.New("Cdrs definition is mandatory!") + for _, smgRALsConn := range self.SmGenericConfig.RALsConns { + if smgRALsConn.Address == utils.MetaInternal && !self.RALsEnabled { + return errors.New(" RALs not enabled but requested by SMGeneric component.") + } } - if self.SmGenericConfig.HaRater[0].Server == utils.INTERNAL && !self.RaterEnabled { - return errors.New("Rater not enabled but requested by SM-Generic component.") + if len(self.SmGenericConfig.CDRsConns) == 0 { + return errors.New(" CDRs definition is mandatory!") } - if self.SmGenericConfig.HaCdrs[0].Server == utils.INTERNAL && !self.CDRSEnabled { - return errors.New("CDRS not enabled but referenced by SM-Generic component") + for _, smgCDRSConn := range self.SmGenericConfig.CDRsConns { + if smgCDRSConn.Address == utils.MetaInternal && !self.CDRSEnabled { + return errors.New(" CDRS not enabled but referenced by SMGeneric component") + } } } - // SM-FreeSWITCH checks + // SMFreeSWITCH checks if self.SmFsConfig.Enabled { - if len(self.SmFsConfig.HaRater) == 0 { - return errors.New("Rater definition is mandatory!") + if len(self.SmFsConfig.RALsConns) == 0 { + return errors.New(" RALs definition is mandatory!") } - if len(self.SmFsConfig.HaCdrs) == 0 { - return errors.New("Cdrs definition is mandatory!") + for _, smFSRaterConn := range self.SmFsConfig.RALsConns { + if smFSRaterConn.Address == utils.MetaInternal && !self.RALsEnabled { + return errors.New(" RALs not enabled but requested by SMFreeSWITCH component.") + } } - if self.SmFsConfig.HaRater[0].Server == utils.INTERNAL && !self.RaterEnabled { - return errors.New("Rater not enabled but requested by SM-FreeSWITCH component.") + if len(self.SmFsConfig.CDRsConns) == 0 { + return errors.New(" CDRS definition is mandatory!") } - if self.SmFsConfig.HaCdrs[0].Server == utils.INTERNAL && !self.CDRSEnabled { - return errors.New("CDRS not enabled but referenced by SM-FreeSWITCH component") + for _, smFSCDRSConn := range self.SmFsConfig.CDRsConns { + if smFSCDRSConn.Address == utils.MetaInternal && !self.CDRSEnabled { + return errors.New("CDRS not enabled but referenced by SMFreeSWITCH component") + } } } // SM-Kamailio checks if self.SmKamConfig.Enabled { - if len(self.SmKamConfig.HaRater) == 0 { + if len(self.SmKamConfig.RALsConns) == 0 { return errors.New("Rater definition is mandatory!") } - if len(self.SmKamConfig.HaCdrs) == 0 { + for _, smKamRaterConn := range self.SmKamConfig.RALsConns { + if smKamRaterConn.Address == utils.MetaInternal && !self.RALsEnabled { + return errors.New("Rater not enabled but requested by SM-Kamailio component.") + } + } + if len(self.SmKamConfig.CDRsConns) == 0 { return errors.New("Cdrs definition is mandatory!") } - if self.SmKamConfig.HaRater[0].Server == utils.INTERNAL && !self.RaterEnabled { - return errors.New("Rater not enabled but requested by SM-Kamailio component.") - } - if self.SmKamConfig.HaCdrs[0].Server == utils.INTERNAL && !self.CDRSEnabled { - return errors.New("CDRS not enabled but referenced by SM-Kamailio component") + for _, smKamCDRSConn := range self.SmKamConfig.CDRsConns { + if smKamCDRSConn.Address == utils.MetaInternal && !self.CDRSEnabled { + return errors.New("CDRS not enabled but referenced by SM-Kamailio component") + } } } - // SM-OpenSIPS checks + // SMOpenSIPS checks if self.SmOsipsConfig.Enabled { - if len(self.SmOsipsConfig.HaRater) == 0 { - return errors.New("Rater definition is mandatory!") + if len(self.SmOsipsConfig.RALsConns) == 0 { + return errors.New(" Rater definition is mandatory!") } - if len(self.SmOsipsConfig.HaCdrs) == 0 { - return errors.New("Cdrs definition is mandatory!") + for _, smOsipsRaterConn := range self.SmOsipsConfig.RALsConns { + if smOsipsRaterConn.Address == utils.MetaInternal && !self.RALsEnabled { + return errors.New(" RALs not enabled but requested by SMOpenSIPS component.") + } } - if self.SmOsipsConfig.HaRater[0].Server == utils.INTERNAL && !self.RaterEnabled { - return errors.New("Rater not enabled but requested by SM-OpenSIPS component.") + if len(self.SmOsipsConfig.CDRsConns) == 0 { + return errors.New(" CDRs definition is mandatory!") } - if self.SmOsipsConfig.HaCdrs[0].Server == utils.INTERNAL && !self.CDRSEnabled { - return errors.New("CDRS not enabled but referenced by SM-OpenSIPS component") + + for _, smOsipsCDRSConn := range self.SmOsipsConfig.CDRsConns { + if smOsipsCDRSConn.Address == utils.MetaInternal && !self.CDRSEnabled { + return errors.New(" CDRS not enabled but referenced by SMOpenSIPS component") + } } } // DAgent checks if self.diameterAgentCfg.Enabled { - if self.diameterAgentCfg.SMGeneric == utils.INTERNAL && !self.SmGenericConfig.Enabled { - return errors.New("SMGeneric not enabled but referenced by DiameterAgent component") + for _, daSMGConn := range self.diameterAgentCfg.SMGenericConns { + if daSMGConn.Address == utils.MetaInternal && !self.SmGenericConfig.Enabled { + return errors.New("SMGeneric not enabled but referenced by DiameterAgent component") + } + } + for _, daPubSubSConn := range self.diameterAgentCfg.PubSubConns { + if daPubSubSConn.Address == utils.MetaInternal && !self.PubSubServerEnabled { + return errors.New("PubSubS not enabled but requested by DiameterAgent component.") + } } } return nil @@ -423,7 +472,7 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { return err } - jsnRaterCfg, err := jsnCfg.RaterJsonCfg() + jsnRALsCfg, err := jsnCfg.RalsJsonCfg() if err != nil { return err } @@ -525,8 +574,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnTpDbCfg.Db_user != nil { self.TpDbUser = *jsnTpDbCfg.Db_user } - if jsnTpDbCfg.Db_passwd != nil { - self.TpDbPass = *jsnTpDbCfg.Db_passwd + if jsnTpDbCfg.Db_password != nil { + self.TpDbPass = *jsnTpDbCfg.Db_password } } @@ -546,8 +595,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnDataDbCfg.Db_user != nil { self.DataDbUser = *jsnDataDbCfg.Db_user } - if jsnDataDbCfg.Db_passwd != nil { - self.DataDbPass = *jsnDataDbCfg.Db_passwd + if jsnDataDbCfg.Db_password != nil { + self.DataDbPass = *jsnDataDbCfg.Db_password } if jsnDataDbCfg.Load_history_size != nil { self.LoadHistorySize = *jsnDataDbCfg.Load_history_size @@ -570,8 +619,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnStorDbCfg.Db_user != nil { self.StorDBUser = *jsnStorDbCfg.Db_user } - if jsnStorDbCfg.Db_passwd != nil { - self.StorDBPass = *jsnStorDbCfg.Db_passwd + if jsnStorDbCfg.Db_password != nil { + self.StorDBPass = *jsnStorDbCfg.Db_password } if jsnStorDbCfg.Max_open_conns != nil { self.StorDBMaxOpenConns = *jsnStorDbCfg.Max_open_conns @@ -588,8 +637,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnGeneralCfg.Dbdata_encoding != nil { self.DBDataEncoding = *jsnGeneralCfg.Dbdata_encoding } - if jsnGeneralCfg.Default_reqtype != nil { - self.DefaultReqType = *jsnGeneralCfg.Default_reqtype + if jsnGeneralCfg.Default_request_type != nil { + self.DefaultReqType = *jsnGeneralCfg.Default_request_type } if jsnGeneralCfg.Default_category != nil { self.DefaultCategory = *jsnGeneralCfg.Default_category @@ -597,9 +646,6 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnGeneralCfg.Default_tenant != nil { self.DefaultTenant = *jsnGeneralCfg.Default_tenant } - if jsnGeneralCfg.Default_subject != nil { - self.DefaultSubject = *jsnGeneralCfg.Default_subject - } if jsnGeneralCfg.Connect_attempts != nil { self.ConnectAttempts = *jsnGeneralCfg.Connect_attempts } @@ -645,30 +691,53 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { } } - if jsnRaterCfg != nil { - if jsnRaterCfg.Enabled != nil { - self.RaterEnabled = *jsnRaterCfg.Enabled + if jsnRALsCfg != nil { + if jsnRALsCfg.Enabled != nil { + self.RALsEnabled = *jsnRALsCfg.Enabled } - if jsnRaterCfg.Balancer != nil { - self.RaterBalancer = *jsnRaterCfg.Balancer + if jsnRALsCfg.Balancer != nil { + self.RALsBalancer = *jsnRALsCfg.Balancer } - if jsnRaterCfg.Cdrstats != nil { - self.RaterCdrStats = *jsnRaterCfg.Cdrstats + if jsnRALsCfg.Cdrstats_conns != nil { + self.RALsCDRStatSConns = make([]*HaPoolConfig, len(*jsnRALsCfg.Cdrstats_conns)) + for idx, jsnHaCfg := range *jsnRALsCfg.Cdrstats_conns { + self.RALsCDRStatSConns[idx] = NewDfltHaPoolConfig() + self.RALsCDRStatSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnRaterCfg.Historys != nil { - self.RaterHistoryServer = *jsnRaterCfg.Historys + if jsnRALsCfg.Historys_conns != nil { + self.RALsHistorySConns = make([]*HaPoolConfig, len(*jsnRALsCfg.Historys_conns)) + for idx, jsnHaCfg := range *jsnRALsCfg.Historys_conns { + self.RALsHistorySConns[idx] = NewDfltHaPoolConfig() + self.RALsHistorySConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnRaterCfg.Pubsubs != nil { - self.RaterPubSubServer = *jsnRaterCfg.Pubsubs + if jsnRALsCfg.Pubsubs_conns != nil { + self.RALsPubSubSConns = make([]*HaPoolConfig, len(*jsnRALsCfg.Pubsubs_conns)) + for idx, jsnHaCfg := range *jsnRALsCfg.Pubsubs_conns { + self.RALsPubSubSConns[idx] = NewDfltHaPoolConfig() + self.RALsPubSubSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnRaterCfg.Aliases != nil { - self.RaterAliasesServer = *jsnRaterCfg.Aliases + if jsnRALsCfg.Aliases_conns != nil { + self.RALsAliasSConns = make([]*HaPoolConfig, len(*jsnRALsCfg.Aliases_conns)) + for idx, jsnHaCfg := range *jsnRALsCfg.Aliases_conns { + self.RALsAliasSConns[idx] = NewDfltHaPoolConfig() + self.RALsAliasSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnRaterCfg.Users != nil { - self.RaterUserServer = *jsnRaterCfg.Users + if jsnRALsCfg.Users_conns != nil { + self.RALsUserSConns = make([]*HaPoolConfig, len(*jsnRALsCfg.Users_conns)) + for idx, jsnHaCfg := range *jsnRALsCfg.Users_conns { + self.RALsUserSConns[idx] = NewDfltHaPoolConfig() + self.RALsUserSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnRaterCfg.Rp_subject_prefix_matching != nil { - self.RpSubjectPrefixMatching = *jsnRaterCfg.Rp_subject_prefix_matching + if jsnRALsCfg.Rp_subject_prefix_matching != nil { + self.RpSubjectPrefixMatching = *jsnRALsCfg.Rp_subject_prefix_matching + } + if jsnRALsCfg.Lcr_subject_prefix_matching != nil { + self.LcrSubjectPrefixMatching = *jsnRALsCfg.Lcr_subject_prefix_matching } } @@ -692,20 +761,40 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnCdrsCfg.Store_cdrs != nil { self.CDRSStoreCdrs = *jsnCdrsCfg.Store_cdrs } - if jsnCdrsCfg.Rater != nil { - self.CDRSRater = *jsnCdrsCfg.Rater + if jsnCdrsCfg.Rals_conns != nil { + self.CDRSRaterConns = make([]*HaPoolConfig, len(*jsnCdrsCfg.Rals_conns)) + for idx, jsnHaCfg := range *jsnCdrsCfg.Rals_conns { + self.CDRSRaterConns[idx] = NewDfltHaPoolConfig() + self.CDRSRaterConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCdrsCfg.Pubsubs != nil { - self.CDRSPubSub = *jsnCdrsCfg.Pubsubs + if jsnCdrsCfg.Pubsubs_conns != nil { + self.CDRSPubSubSConns = make([]*HaPoolConfig, len(*jsnCdrsCfg.Pubsubs_conns)) + for idx, jsnHaCfg := range *jsnCdrsCfg.Pubsubs_conns { + self.CDRSPubSubSConns[idx] = NewDfltHaPoolConfig() + self.CDRSPubSubSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCdrsCfg.Users != nil { - self.CDRSUsers = *jsnCdrsCfg.Users + if jsnCdrsCfg.Users_conns != nil { + self.CDRSUserSConns = make([]*HaPoolConfig, len(*jsnCdrsCfg.Users_conns)) + for idx, jsnHaCfg := range *jsnCdrsCfg.Users_conns { + self.CDRSUserSConns[idx] = NewDfltHaPoolConfig() + self.CDRSUserSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCdrsCfg.Aliases != nil { - self.CDRSAliases = *jsnCdrsCfg.Aliases + if jsnCdrsCfg.Aliases_conns != nil { + self.CDRSAliaseSConns = make([]*HaPoolConfig, len(*jsnCdrsCfg.Aliases_conns)) + for idx, jsnHaCfg := range *jsnCdrsCfg.Aliases_conns { + self.CDRSAliaseSConns[idx] = NewDfltHaPoolConfig() + self.CDRSAliaseSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCdrsCfg.Cdrstats != nil { - self.CDRSStats = *jsnCdrsCfg.Cdrstats + if jsnCdrsCfg.Cdrstats_conns != nil { + self.CDRSStatSConns = make([]*HaPoolConfig, len(*jsnCdrsCfg.Cdrstats_conns)) + for idx, jsnHaCfg := range *jsnCdrsCfg.Cdrstats_conns { + self.CDRSStatSConns[idx] = NewDfltHaPoolConfig() + self.CDRSStatSConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCdrsCfg.Cdr_replication != nil { self.CDRSCdrReplication = make([]*CdrReplicationCfg, len(*jsnCdrsCfg.Cdr_replication)) @@ -714,8 +803,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if rplJsonCfg.Transport != nil { self.CDRSCdrReplication[idx].Transport = *rplJsonCfg.Transport } - if rplJsonCfg.Server != nil { - self.CDRSCdrReplication[idx].Server = *rplJsonCfg.Server + if rplJsonCfg.Address != nil { + self.CDRSCdrReplication[idx].Address = *rplJsonCfg.Address } if rplJsonCfg.Synchronous != nil { self.CDRSCdrReplication[idx].Synchronous = *rplJsonCfg.Synchronous @@ -760,28 +849,26 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { } } } - if jsnCdrcCfg != nil { if self.CdrcProfiles == nil { - self.CdrcProfiles = make(map[string]map[string]*CdrcConfig) + self.CdrcProfiles = make(map[string][]*CdrcConfig) } - for profileName, jsnCrc1Cfg := range jsnCdrcCfg { + for _, jsnCrc1Cfg := range jsnCdrcCfg { if _, hasDir := self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir]; !hasDir { - self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir] = make(map[string]*CdrcConfig) + self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir] = make([]*CdrcConfig, 0) } - if _, hasProfile := self.CdrcProfiles[profileName]; !hasProfile { - if profileName == utils.META_DEFAULT { - self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir][profileName] = new(CdrcConfig) - } else { - self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir][profileName] = self.dfltCdrcProfile.Clone() // Clone default so we do not inherit pointers - } + var cdrcInstCfg *CdrcConfig + if *jsnCrc1Cfg.Id == utils.META_DEFAULT && self.dfltCdrcProfile == nil { + cdrcInstCfg = new(CdrcConfig) + } else { + cdrcInstCfg = self.dfltCdrcProfile.Clone() // Clone default so we do not inherit pointers } - if err = self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir][profileName].loadFromJsonCfg(jsnCrc1Cfg); err != nil { + if err := cdrcInstCfg.loadFromJsonCfg(jsnCrc1Cfg); err != nil { return err } + self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir] = append(self.CdrcProfiles[*jsnCrc1Cfg.Cdr_in_dir], cdrcInstCfg) } } - if jsnSmGenericCfg != nil { if err := self.SmGenericConfig.loadFromJsonCfg(jsnSmGenericCfg); err != nil { return err @@ -853,8 +940,8 @@ func (self *CGRConfig) loadFromJsonCfg(jsnCfg *CgrJsonCfg) error { if jsnMailerCfg.Auth_user != nil { self.MailerAuthUser = *jsnMailerCfg.Auth_user } - if jsnMailerCfg.Auth_passwd != nil { - self.MailerAuthPass = *jsnMailerCfg.Auth_passwd + if jsnMailerCfg.Auth_password != nil { + self.MailerAuthPass = *jsnMailerCfg.Auth_password } if jsnMailerCfg.From_address != nil { self.MailerFromAddr = *jsnMailerCfg.From_address diff --git a/config/config_defaults.go b/config/config_defaults.go index f79ef663b..c10d0d06c 100644 --- a/config/config_defaults.go +++ b/config/config_defaults.go @@ -29,18 +29,17 @@ const CGRATES_CFG_JSON = ` "general": { "http_skip_tls_verify": false, // if enabled Http Client will accept any TLS certificate - "rounding_decimals": 5, // system level precision for floats + "rounding_decimals": 5, // system level precision for floats "dbdata_encoding": "msgpack", // encoding used to store object data in strings: "tpexport_dir": "/var/log/cgrates/tpe", // path towards export folder for offline Tariff Plans "http_failed_dir": "/var/log/cgrates/http_failed", // directory path where we store failed http requests - "default_reqtype": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> - "default_category": "call", // default Type of Record to consider when missing from requests - "default_tenant": "cgrates.org", // default Tenant to consider when missing from requests - "default_subject": "cgrates", // default rating Subject to consider when missing from requests + "default_request_type": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> + "default_category": "call", // default category to consider when missing from requests + "default_tenant": "cgrates.org", // default tenant to consider when missing from requests "default_timezone": "Local", // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> "connect_attempts": 3, // initial server connect attempts "reconnects": -1, // number of retries in case of connection lost - "response_cache_ttl": "3s", // the life span of a cached response + "response_cache_ttl": "0s", // the life span of a cached response "internal_ttl": "2m", // maximum duration to wait for internal connections before giving up }, @@ -58,7 +57,7 @@ const CGRATES_CFG_JSON = ` "db_port": 6379, // port to reach the tariffplan_db "db_name": "10", // tariffplan_db name to connect to "db_user": "", // sername to use when connecting to tariffplan_db - "db_passwd": "", // password to use when connecting to tariffplan_db + "db_password": "", // password to use when connecting to tariffplan_db }, @@ -68,7 +67,7 @@ const CGRATES_CFG_JSON = ` "db_port": 6379, // data_db port to reach the database "db_name": "11", // data_db database name to connect to "db_user": "", // username to use when connecting to data_db - "db_passwd": "", // password to use when connecting to data_db + "db_password": "", // password to use when connecting to data_db "load_history_size": 10, // Number of records in the load history }, @@ -79,7 +78,7 @@ const CGRATES_CFG_JSON = ` "db_port": 3306, // the port to reach the stordb "db_name": "cgrates", // stor database name "db_user": "cgrates", // username to use when connecting to stordb - "db_passwd": "CGRateS.org", // password to use when connecting to stordb + "db_password": "CGRateS.org", // password to use when connecting to stordb "max_open_conns": 100, // maximum database connections opened "max_idle_conns": 10, // maximum database connections idle "cdrs_indexes": [], // indexes on cdrs table to speed up queries, used only in case of mongo @@ -91,15 +90,16 @@ const CGRATES_CFG_JSON = ` }, -"rater": { +"rals": { "enabled": false, // enable Rater service: - "balancer": "", // register to balancer as worker: <""|internal|x.y.z.y:1234> - "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality: <""|internal|x.y.z.y:1234> - "historys": "", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> - "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> - "rp_subject_prefix_matching": false // enables prefix matching for the rating profile subject + "balancer": "", // register to balancer as worker: <""|*internal|x.y.z.y:1234> + "cdrstats_conns": [], // address where to reach the cdrstats service, empty to disable stats functionality: <""|*internal|x.y.z.y:1234> + "historys_conns": [], // address where to reach the history service, empty to disable history functionality: <""|*internal|x.y.z.y:1234> + "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> + "users_conns": [], // address where to reach the user service, empty to disable user profile functionality: <""|*internal|x.y.z.y:1234> + "aliases_conns": [], // address where to reach the aliases service, empty to disable aliases functionality: <""|*internal|x.y.z.y:1234> + "rp_subject_prefix_matching": false, // enables prefix matching for the rating profile subject + "lcr_subject_prefix_matching": false // enables prefix matching for the lcr subject }, @@ -112,12 +112,14 @@ const CGRATES_CFG_JSON = ` "enabled": false, // start the CDR Server service: "extra_fields": [], // extra fields to store in CDRs for non-generic CDRs "store_cdrs": true, // store cdrs in storDb - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "cdr_replication":[], // replicate the raw CDR to a number of servers + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater for cost calculation, empty to disable functionality: <""|*internal|x.y.z.y:1234> + ], + "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> + "users_conns": [], // address where to reach the user service, empty to disable user profile functionality: <""|*internal|x.y.z.y:1234> + "aliases_conns": [], // address where to reach the aliases service, empty to disable aliases functionality: <""|*internal|x.y.z.y:1234> + "cdrstats_conns": [], // address where to reach the cdrstats service, empty to disable stats functionality<""|*internal|x.y.z.y:1234> + "cdr_replication":[] // replicate the raw CDR to a number of servers }, @@ -133,7 +135,7 @@ const CGRATES_CFG_JSON = ` "field_separator": ",", "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) - "mms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from MMS unit to call duration in some billing systems) + "mms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from MMS unit to call duration in some billing systems) "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems) "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding @@ -164,11 +166,14 @@ const CGRATES_CFG_JSON = ` }, -"cdrc": { - "*default": { +"cdrc": [ + { + "id": "*default", // identifier of the CDRC runner "enabled": false, // enable CDR client functionality "dry_run": false, // do not send the CDRs to CDRS, just parse them - "cdrs": "internal", // address where to reach CDR server. + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR server. <*internal|x.y.z.y:1234> + ], "cdr_format": "csv", // CDR file format "field_separator": ",", // separator used in case of csv files "timezone": "", // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> @@ -198,52 +203,67 @@ const CGRATES_CFG_JSON = ` {"tag": "Usage", "field_id": "Usage", "type": "*composed", "value": "13", "mandatory": true}, ], "trailer_fields": [], // template of the import trailer fields - } -}, + }, +], "sm_generic": { "enabled": false, // starts SessionManager service: "listen_bijson": "127.0.0.1:2014", // address where to listen for bidirectional JSON-RPC requests - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> + ], "debit_interval": "0s", // interval to perform debits on. "min_call_duration": "0s", // only authorize calls with allowed duration higher than this "max_call_duration": "3h", // maximum call duration a prepaid call can last + "session_ttl": "0s", // time after a session with no updates is terminated, not defined by default + //"session_ttl_last_used": "", // tweak LastUsed for sessions timing-out, not defined by default + //"session_ttl_usage": "", // tweak Usage for sessions timing-out, not defined by default }, "sm_freeswitch": { "enabled": false, // starts SessionManager service: - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> - "create_cdr": false, // create CDR out of events and sends it to CDRS component - "extra_fields": [], // extra fields to store in auth/CDRs when creating them - "debit_interval": "10s", // interval to perform debits on. - "min_call_duration": "0s", // only authorize calls with allowed duration higher than this - "max_call_duration": "3h", // maximum call duration a prepaid call can last - "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval) - "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls - "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance - "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined) - "subscribe_park": true, // subscribe via fsock to receive park events - "channel_sync_interval": "5m", // sync channels with freeswitch regularly - "max_wait_connection": "2s", // maximum duration to wait for a connection to be retrieved from the pool - "connections":[ // instantiate connections to multiple FreeSWITCH servers - {"server": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> + ], + "create_cdr": false, // create CDR out of events and sends them to CDRS component + "extra_fields": [], // extra fields to store in auth/CDRs when creating them + "debit_interval": "10s", // interval to perform debits on. + "min_call_duration": "0s", // only authorize calls with allowed duration higher than this + "max_call_duration": "3h", // maximum call duration a prepaid call can last + "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval) + "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls + "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance + "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined) + "subscribe_park": true, // subscribe via fsock to receive park events + "channel_sync_interval": "5m", // sync channels with freeswitch regularly + "max_wait_connection": "2s", // maximum duration to wait for a connection to be retrieved from the pool + "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers + {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} ], }, "sm_kamailio": { - "enabled": false, // starts SessionManager service: - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> - "create_cdr": false, // create CDR out of events and sends it to CDRS component - "debit_interval": "10s", // interval to perform debits on. - "min_call_duration": "0s", // only authorize calls with allowed duration higher than this - "max_call_duration": "3h", // maximum call duration a prepaid call can last - "connections":[ // instantiate connections to multiple Kamailio servers - {"evapi_addr": "127.0.0.1:8448", "reconnects": 5} + "enabled": false, // starts SessionManager service: + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> + ], + "create_cdr": false, // create CDR out of events and sends them to CDRS component + "debit_interval": "10s", // interval to perform debits on. + "min_call_duration": "0s", // only authorize calls with allowed duration higher than this + "max_call_duration": "3h", // maximum call duration a prepaid call can last + "evapi_conns":[ // instantiate connections to multiple Kamailio servers + {"address": "127.0.0.1:8448", "reconnects": 5} ], }, @@ -251,8 +271,12 @@ const CGRATES_CFG_JSON = ` "sm_opensips": { "enabled": false, // starts SessionManager service: "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> + ], "reconnects": 5, // number of reconnects if connection is lost "create_cdr": false, // create CDR out of events and sends it to CDRS component "debit_interval": "10s", // interval to perform debits on. @@ -267,8 +291,10 @@ const CGRATES_CFG_JSON = ` "enabled": false, // enables the diameter agent: "listen": "127.0.0.1:3868", // address where to listen for diameter requests "dictionaries_dir": "/usr/share/cgrates/diameter/dict/", // path towards directory holding additional dictionaries to load - "sm_generic": "internal", // connection towards SMG component for session management - "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> + "sm_generic_conns": [ + {"address": "*internal"} // connection towards SMG component for session management + ], + "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> "create_cdr": true, // create CDR out of CCR terminate and send it to SMG component "debit_interval": "5m", // interval for CCR updates "timezone": "", // timezone for timestamps where not specified, empty for general defaults <""|UTC|Local|$IANA_TZ_DB> @@ -335,7 +361,7 @@ const CGRATES_CFG_JSON = ` "mailer": { "server": "localhost", // the server to use when sending emails out "auth_user": "cgrates", // authenticate to email server using this user - "auth_passwd": "CGRateS.org", // authenticate to email server with this password + "auth_password": "CGRateS.org", // authenticate to email server with this password "from_address": "cgr-mailer@localhost.localdomain" // from address used when sending emails out }, diff --git a/config/config_json.go b/config/config_json.go index ff7dd366d..0336f5c71 100644 --- a/config/config_json.go +++ b/config/config_json.go @@ -33,7 +33,7 @@ const ( DATADB_JSN = "data_db" STORDB_JSN = "stor_db" BALANCER_JSN = "balancer" - RATER_JSN = "rater" + RALS_JSN = "rals" SCHEDULER_JSN = "scheduler" CDRS_JSN = "cdrs" MEDIATOR_JSN = "mediator" @@ -128,12 +128,12 @@ func (self CgrJsonCfg) BalancerJsonCfg() (*BalancerJsonCfg, error) { return cfg, nil } -func (self CgrJsonCfg) RaterJsonCfg() (*RaterJsonCfg, error) { - rawCfg, hasKey := self[RATER_JSN] +func (self CgrJsonCfg) RalsJsonCfg() (*RalsJsonCfg, error) { + rawCfg, hasKey := self[RALS_JSN] if !hasKey { return nil, nil } - cfg := new(RaterJsonCfg) + cfg := new(RalsJsonCfg) if err := json.Unmarshal(*rawCfg, cfg); err != nil { return nil, err } @@ -188,12 +188,12 @@ func (self CgrJsonCfg) CdreJsonCfgs() (map[string]*CdreJsonCfg, error) { return cfg, nil } -func (self CgrJsonCfg) CdrcJsonCfg() (map[string]*CdrcJsonCfg, error) { +func (self CgrJsonCfg) CdrcJsonCfg() ([]*CdrcJsonCfg, error) { rawCfg, hasKey := self[CDRC_JSN] if !hasKey { return nil, nil } - cfg := make(map[string]*CdrcJsonCfg) + cfg := make([]*CdrcJsonCfg, 0) if err := json.Unmarshal(*rawCfg, &cfg); err != nil { return nil, err } diff --git a/config/config_json_test.go b/config/config_json_test.go index 9238aae39..c879439cf 100644 --- a/config/config_json_test.go +++ b/config/config_json_test.go @@ -44,14 +44,13 @@ func TestDfGeneralJsonCfg(t *testing.T) { Dbdata_encoding: utils.StringPointer("msgpack"), Tpexport_dir: utils.StringPointer("/var/log/cgrates/tpe"), Http_failed_dir: utils.StringPointer("/var/log/cgrates/http_failed"), - Default_reqtype: utils.StringPointer(utils.META_RATED), + Default_request_type: utils.StringPointer(utils.META_RATED), Default_category: utils.StringPointer("call"), Default_tenant: utils.StringPointer("cgrates.org"), - Default_subject: utils.StringPointer("cgrates"), Default_timezone: utils.StringPointer("Local"), Connect_attempts: utils.IntPointer(3), Reconnects: utils.IntPointer(-1), - Response_cache_ttl: utils.StringPointer("3s"), + Response_cache_ttl: utils.StringPointer("0s"), Internal_ttl: utils.StringPointer("2m")} if gCfg, err := dfCgrJsonCfg.GeneralJsonCfg(); err != nil { t.Error(err) @@ -74,12 +73,12 @@ func TestDfListenJsonCfg(t *testing.T) { func TestDfDbJsonCfg(t *testing.T) { eCfg := &DbJsonCfg{ - Db_type: utils.StringPointer("redis"), - Db_host: utils.StringPointer("127.0.0.1"), - Db_port: utils.IntPointer(6379), - Db_name: utils.StringPointer("10"), - Db_user: utils.StringPointer(""), - Db_passwd: utils.StringPointer(""), + Db_type: utils.StringPointer("redis"), + Db_host: utils.StringPointer("127.0.0.1"), + Db_port: utils.IntPointer(6379), + Db_name: utils.StringPointer("10"), + Db_user: utils.StringPointer(""), + Db_password: utils.StringPointer(""), } if cfg, err := dfCgrJsonCfg.DbJsonCfg(TPDB_JSN); err != nil { t.Error(err) @@ -92,7 +91,7 @@ func TestDfDbJsonCfg(t *testing.T) { Db_port: utils.IntPointer(6379), Db_name: utils.StringPointer("11"), Db_user: utils.StringPointer(""), - Db_passwd: utils.StringPointer(""), + Db_password: utils.StringPointer(""), Load_history_size: utils.IntPointer(10), } if cfg, err := dfCgrJsonCfg.DbJsonCfg(DATADB_JSN); err != nil { @@ -106,7 +105,7 @@ func TestDfDbJsonCfg(t *testing.T) { Db_port: utils.IntPointer(3306), Db_name: utils.StringPointer("cgrates"), Db_user: utils.StringPointer("cgrates"), - Db_passwd: utils.StringPointer("CGRateS.org"), + Db_password: utils.StringPointer("CGRateS.org"), Max_open_conns: utils.IntPointer(100), Max_idle_conns: utils.IntPointer(10), Cdrs_indexes: utils.StringSlicePointer([]string{}), @@ -127,10 +126,11 @@ func TestDfBalancerJsonCfg(t *testing.T) { } } -func TestDfRaterJsonCfg(t *testing.T) { - eCfg := &RaterJsonCfg{Enabled: utils.BoolPointer(false), Balancer: utils.StringPointer(""), Cdrstats: utils.StringPointer(""), - Historys: utils.StringPointer(""), Pubsubs: utils.StringPointer(""), Users: utils.StringPointer(""), Aliases: utils.StringPointer(""), Rp_subject_prefix_matching: utils.BoolPointer(false)} - if cfg, err := dfCgrJsonCfg.RaterJsonCfg(); err != nil { +func TestDfRalsJsonCfg(t *testing.T) { + eCfg := &RalsJsonCfg{Enabled: utils.BoolPointer(false), Balancer: utils.StringPointer(""), Cdrstats_conns: &[]*HaPoolJsonCfg{}, + Historys_conns: &[]*HaPoolJsonCfg{}, Pubsubs_conns: &[]*HaPoolJsonCfg{}, Users_conns: &[]*HaPoolJsonCfg{}, Aliases_conns: &[]*HaPoolJsonCfg{}, + Rp_subject_prefix_matching: utils.BoolPointer(false), Lcr_subject_prefix_matching: utils.BoolPointer(false)} + if cfg, err := dfCgrJsonCfg.RalsJsonCfg(); err != nil { t.Error(err) } else if !reflect.DeepEqual(eCfg, cfg) { t.Errorf("Received: %+v", cfg) @@ -148,14 +148,17 @@ func TestDfSchedulerJsonCfg(t *testing.T) { func TestDfCdrsJsonCfg(t *testing.T) { eCfg := &CdrsJsonCfg{ - Enabled: utils.BoolPointer(false), - Extra_fields: utils.StringSlicePointer([]string{}), - Store_cdrs: utils.BoolPointer(true), - Rater: utils.StringPointer("internal"), - Pubsubs: utils.StringPointer(""), - Users: utils.StringPointer(""), - Aliases: utils.StringPointer(""), - Cdrstats: utils.StringPointer(""), + Enabled: utils.BoolPointer(false), + Extra_fields: utils.StringSlicePointer([]string{}), + Store_cdrs: utils.BoolPointer(true), + Rals_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer("*internal"), + }}, + Pubsubs_conns: &[]*HaPoolJsonCfg{}, + Users_conns: &[]*HaPoolJsonCfg{}, + Aliases_conns: &[]*HaPoolJsonCfg{}, + Cdrstats_conns: &[]*HaPoolJsonCfg{}, Cdr_replication: &[]*CdrReplicationJsonCfg{}, } if cfg, err := dfCgrJsonCfg.CdrsJsonCfg(); err != nil { @@ -299,11 +302,14 @@ func TestDfCdrcJsonCfg(t *testing.T) { &CdrFieldJsonCfg{Tag: utils.StringPointer("Usage"), Field_id: utils.StringPointer(utils.USAGE), Type: utils.StringPointer(utils.META_COMPOSED), Value: utils.StringPointer("13"), Mandatory: utils.BoolPointer(true)}, } - eCfg := map[string]*CdrcJsonCfg{ - "*default": &CdrcJsonCfg{ - Enabled: utils.BoolPointer(false), - Dry_run: utils.BoolPointer(false), - Cdrs: utils.StringPointer("internal"), + eCfg := []*CdrcJsonCfg{ + &CdrcJsonCfg{ + Id: utils.StringPointer(utils.META_DEFAULT), + Enabled: utils.BoolPointer(false), + Dry_run: utils.BoolPointer(false), + Cdrs_conns: &[]*HaPoolJsonCfg{&HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, Cdr_format: utils.StringPointer("csv"), Field_separator: utils.StringPointer(","), Timezone: utils.StringPointer(""), @@ -325,19 +331,26 @@ func TestDfCdrcJsonCfg(t *testing.T) { if cfg, err := dfCgrJsonCfg.CdrcJsonCfg(); err != nil { t.Error(err) } else if !reflect.DeepEqual(eCfg, cfg) { - t.Error("Received: ", cfg["*default"]) + t.Errorf("Expecting: \n%s\n, received: \n%s\n: ", utils.ToIJSON(eCfg), utils.ToIJSON(cfg)) } } func TestSmGenericJsonCfg(t *testing.T) { eCfg := &SmGenericJsonCfg{ - Enabled: utils.BoolPointer(false), - Listen_bijson: utils.StringPointer("127.0.0.1:2014"), - Rater: utils.StringPointer("internal"), - Cdrs: utils.StringPointer("internal"), + Enabled: utils.BoolPointer(false), + Listen_bijson: utils.StringPointer("127.0.0.1:2014"), + Rals_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, + Cdrs_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, Debit_interval: utils.StringPointer("0s"), Min_call_duration: utils.StringPointer("0s"), Max_call_duration: utils.StringPointer("3h"), + Session_ttl: utils.StringPointer("0s"), } if cfg, err := dfCgrJsonCfg.SmGenericJsonCfg(); err != nil { t.Error(err) @@ -348,9 +361,15 @@ func TestSmGenericJsonCfg(t *testing.T) { func TestSmFsJsonCfg(t *testing.T) { eCfg := &SmFsJsonCfg{ - Enabled: utils.BoolPointer(false), - Rater: utils.StringPointer("internal"), - Cdrs: utils.StringPointer("internal"), + Enabled: utils.BoolPointer(false), + Rals_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, + Cdrs_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, Create_cdr: utils.BoolPointer(false), Extra_fields: utils.StringSlicePointer([]string{}), Debit_interval: utils.StringPointer("10s"), @@ -363,9 +382,9 @@ func TestSmFsJsonCfg(t *testing.T) { Subscribe_park: utils.BoolPointer(true), Channel_sync_interval: utils.StringPointer("5m"), Max_wait_connection: utils.StringPointer("2s"), - Connections: &[]*FsConnJsonCfg{ + Event_socket_conns: &[]*FsConnJsonCfg{ &FsConnJsonCfg{ - Server: utils.StringPointer("127.0.0.1:8021"), + Address: utils.StringPointer("127.0.0.1:8021"), Password: utils.StringPointer("ClueCon"), Reconnects: utils.IntPointer(5), }}, @@ -379,16 +398,22 @@ func TestSmFsJsonCfg(t *testing.T) { func TestSmKamJsonCfg(t *testing.T) { eCfg := &SmKamJsonCfg{ - Enabled: utils.BoolPointer(false), - Rater: utils.StringPointer("internal"), - Cdrs: utils.StringPointer("internal"), + Enabled: utils.BoolPointer(false), + Rals_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, + Cdrs_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, Create_cdr: utils.BoolPointer(false), Debit_interval: utils.StringPointer("10s"), Min_call_duration: utils.StringPointer("0s"), Max_call_duration: utils.StringPointer("3h"), - Connections: &[]*KamConnJsonCfg{ + Evapi_conns: &[]*KamConnJsonCfg{ &KamConnJsonCfg{ - Evapi_addr: utils.StringPointer("127.0.0.1:8448"), + Address: utils.StringPointer("127.0.0.1:8448"), Reconnects: utils.IntPointer(5), }, }, @@ -402,10 +427,16 @@ func TestSmKamJsonCfg(t *testing.T) { func TestSmOsipsJsonCfg(t *testing.T) { eCfg := &SmOsipsJsonCfg{ - Enabled: utils.BoolPointer(false), - Listen_udp: utils.StringPointer("127.0.0.1:2020"), - Rater: utils.StringPointer("internal"), - Cdrs: utils.StringPointer("internal"), + Enabled: utils.BoolPointer(false), + Listen_udp: utils.StringPointer("127.0.0.1:2020"), + Rals_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, + Cdrs_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, Create_cdr: utils.BoolPointer(false), Debit_interval: utils.StringPointer("10s"), Min_call_duration: utils.StringPointer("0s"), @@ -425,16 +456,19 @@ func TestDiameterAgentJsonCfg(t *testing.T) { Enabled: utils.BoolPointer(false), Listen: utils.StringPointer("127.0.0.1:3868"), Dictionaries_dir: utils.StringPointer("/usr/share/cgrates/diameter/dict/"), - Sm_generic: utils.StringPointer("internal"), - Pubsubs: utils.StringPointer(""), - Create_cdr: utils.BoolPointer(true), - Debit_interval: utils.StringPointer("5m"), - Timezone: utils.StringPointer(""), - Dialect: utils.StringPointer("huawei"), - Origin_host: utils.StringPointer("CGR-DA"), - Origin_realm: utils.StringPointer("cgrates.org"), - Vendor_id: utils.IntPointer(0), - Product_name: utils.StringPointer("CGRateS"), + Sm_generic_conns: &[]*HaPoolJsonCfg{ + &HaPoolJsonCfg{ + Address: utils.StringPointer(utils.MetaInternal), + }}, + Pubsubs_conns: &[]*HaPoolJsonCfg{}, + Create_cdr: utils.BoolPointer(true), + Debit_interval: utils.StringPointer("5m"), + Timezone: utils.StringPointer(""), + Dialect: utils.StringPointer("huawei"), + Origin_host: utils.StringPointer("CGR-DA"), + Origin_realm: utils.StringPointer("cgrates.org"), + Vendor_id: utils.IntPointer(0), + Product_name: utils.StringPointer("CGRateS"), Request_processors: &[]*DARequestProcessorJsnCfg{ &DARequestProcessorJsnCfg{ Id: utils.StringPointer("*default"), @@ -536,10 +570,10 @@ func TestDfUserServJsonCfg(t *testing.T) { func TestDfMailerJsonCfg(t *testing.T) { eCfg := &MailerJsonCfg{ - Server: utils.StringPointer("localhost"), - Auth_user: utils.StringPointer("cgrates"), - Auth_passwd: utils.StringPointer("CGRateS.org"), - From_address: utils.StringPointer("cgr-mailer@localhost.localdomain"), + Server: utils.StringPointer("localhost"), + Auth_user: utils.StringPointer("cgrates"), + Auth_password: utils.StringPointer("CGRateS.org"), + From_address: utils.StringPointer("cgr-mailer@localhost.localdomain"), } if cfg, err := dfCgrJsonCfg.MailerJsonCfg(); err != nil { t.Error(err) @@ -589,25 +623,27 @@ func TestNewCgrJsonCfgFromFile(t *testing.T) { if err != nil { t.Error(err) } - eCfg := &GeneralJsonCfg{Default_reqtype: utils.StringPointer(utils.META_PSEUDOPREPAID)} + eCfg := &GeneralJsonCfg{Default_request_type: utils.StringPointer(utils.META_PSEUDOPREPAID)} if gCfg, err := cgrJsonCfg.GeneralJsonCfg(); err != nil { t.Error(err) } else if !reflect.DeepEqual(eCfg, gCfg) { - t.Error("Received: ", gCfg) + t.Errorf("Expecting: %+v, received: ", eCfg, gCfg) } cdrFields := []*CdrFieldJsonCfg{ &CdrFieldJsonCfg{Field_id: utils.StringPointer(utils.TOR), Value: utils.StringPointer("~7:s/^(voice|data|sms|mms|generic)$/*$1/")}, &CdrFieldJsonCfg{Field_id: utils.StringPointer(utils.ANSWER_TIME), Value: utils.StringPointer("1")}, &CdrFieldJsonCfg{Field_id: utils.StringPointer(utils.USAGE), Value: utils.StringPointer(`~9:s/^(\d+)$/${1}s/`)}, } - eCfgCdrc := map[string]*CdrcJsonCfg{ - "CDRC-CSV1": &CdrcJsonCfg{ + eCfgCdrc := []*CdrcJsonCfg{ + &CdrcJsonCfg{ + Id: utils.StringPointer("CDRC-CSV1"), Enabled: utils.BoolPointer(true), Cdr_in_dir: utils.StringPointer("/tmp/cgrates/cdrc1/in"), Cdr_out_dir: utils.StringPointer("/tmp/cgrates/cdrc1/out"), Cdr_source_id: utils.StringPointer("csv1"), }, - "CDRC-CSV2": &CdrcJsonCfg{ + &CdrcJsonCfg{ + Id: utils.StringPointer("CDRC-CSV2"), Enabled: utils.BoolPointer(true), Data_usage_multiply_factor: utils.Float64Pointer(0.000976563), Run_delay: utils.IntPointer(1), @@ -620,18 +656,18 @@ func TestNewCgrJsonCfgFromFile(t *testing.T) { if cfg, err := cgrJsonCfg.CdrcJsonCfg(); err != nil { t.Error(err) } else if !reflect.DeepEqual(eCfgCdrc, cfg) { - t.Error("Received: ", utils.ToIJSON(cfg["CDRC-CSV2"])) + t.Errorf("Expecting:\n %+v\n received:\n %+v\n", utils.ToIJSON(eCfgCdrc), utils.ToIJSON(cfg)) } eCfgSmFs := &SmFsJsonCfg{ Enabled: utils.BoolPointer(true), - Connections: &[]*FsConnJsonCfg{ + Event_socket_conns: &[]*FsConnJsonCfg{ &FsConnJsonCfg{ - Server: utils.StringPointer("1.2.3.4:8021"), + Address: utils.StringPointer("1.2.3.4:8021"), Password: utils.StringPointer("ClueCon"), Reconnects: utils.IntPointer(5), }, &FsConnJsonCfg{ - Server: utils.StringPointer("2.3.4.5:8021"), + Address: utils.StringPointer("2.3.4.5:8021"), Password: utils.StringPointer("ClueCon"), Reconnects: utils.IntPointer(5), }, diff --git a/config/config_test.go b/config/config_test.go index d9f3ff7ee..204aaa8b9 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -39,18 +39,18 @@ func TestLoadCgrCfgWithDefaults(t *testing.T) { { "sm_freeswitch": { "enabled": true, // starts SessionManager service: - "connections":[ // instantiate connections to multiple FreeSWITCH servers - {"server": "1.2.3.4:8021", "password": "ClueCon", "reconnects": 3}, - {"server": "1.2.3.5:8021", "password": "ClueCon", "reconnects": 5} + "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers + {"address": "1.2.3.4:8021", "password": "ClueCon", "reconnects": 3}, + {"address": "1.2.3.5:8021", "password": "ClueCon", "reconnects": 5} ], }, }` eCgrCfg, _ := NewDefaultCGRConfig() eCgrCfg.SmFsConfig.Enabled = true - eCgrCfg.SmFsConfig.Connections = []*FsConnConfig{ - &FsConnConfig{Server: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 3}, - &FsConnConfig{Server: "1.2.3.5:8021", Password: "ClueCon", Reconnects: 5}, + eCgrCfg.SmFsConfig.EventSocketConns = []*FsConnConfig{ + &FsConnConfig{Address: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 3}, + &FsConnConfig{Address: "1.2.3.5:8021", Password: "ClueCon", Reconnects: 5}, } if cgrCfg, err := NewCGRConfigFromJsonStringWithDefaults(JSN_CFG); err != nil { t.Error(err) diff --git a/config/configcdrc_test.go b/config/configcdrc_test.go index 621a07b70..51d88f60c 100644 --- a/config/configcdrc_test.go +++ b/config/configcdrc_test.go @@ -32,12 +32,13 @@ func TestLoadCdrcConfigMultipleFiles(t *testing.T) { t.Error(err) } eCgrCfg, _ := NewDefaultCGRConfig() - eCgrCfg.CdrcProfiles = make(map[string]map[string]*CdrcConfig) + eCgrCfg.CdrcProfiles = make(map[string][]*CdrcConfig) // Default instance first - eCgrCfg.CdrcProfiles["/var/log/cgrates/cdrc/in"] = map[string]*CdrcConfig{ - "*default": &CdrcConfig{ + eCgrCfg.CdrcProfiles["/var/log/cgrates/cdrc/in"] = []*CdrcConfig{ + &CdrcConfig{ + ID: utils.META_DEFAULT, Enabled: false, - Cdrs: "internal", + CdrsConns: []*HaPoolConfig{&HaPoolConfig{Address: utils.MetaInternal}}, CdrFormat: "csv", FieldSeparator: ',', DataUsageMultiplyFactor: 1024, @@ -79,10 +80,11 @@ func TestLoadCdrcConfigMultipleFiles(t *testing.T) { TrailerFields: make([]*CfgCdrField, 0), }, } - eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc1/in"] = map[string]*CdrcConfig{ - "CDRC-CSV1": &CdrcConfig{ + eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc1/in"] = []*CdrcConfig{ + &CdrcConfig{ + ID: "CDRC-CSV1", Enabled: true, - Cdrs: "internal", + CdrsConns: []*HaPoolConfig{&HaPoolConfig{Address: utils.MetaInternal}}, CdrFormat: "csv", FieldSeparator: ',', DataUsageMultiplyFactor: 1024, @@ -122,14 +124,15 @@ func TestLoadCdrcConfigMultipleFiles(t *testing.T) { TrailerFields: make([]*CfgCdrField, 0), }, } - eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc2/in"] = map[string]*CdrcConfig{ - "CDRC-CSV2": &CdrcConfig{ + eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc2/in"] = []*CdrcConfig{ + &CdrcConfig{ + ID: "CDRC-CSV2", Enabled: true, - Cdrs: "internal", + CdrsConns: []*HaPoolConfig{&HaPoolConfig{Address: utils.MetaInternal}}, CdrFormat: "csv", FieldSeparator: ',', DataUsageMultiplyFactor: 0.000976563, - RunDelay: 0, + RunDelay: 1000000000, MaxOpenFiles: 1024, CdrInDir: "/tmp/cgrates/cdrc2/in", CdrOutDir: "/tmp/cgrates/cdrc2/out", @@ -137,18 +140,21 @@ func TestLoadCdrcConfigMultipleFiles(t *testing.T) { CdrFilter: utils.ParseRSRFieldsMustCompile("", utils.INFIELD_SEP), HeaderFields: make([]*CfgCdrField, 0), ContentFields: []*CfgCdrField{ - &CfgCdrField{Tag: "", Type: "", FieldId: utils.TOR, Value: utils.ParseRSRFieldsMustCompile("~7:s/^(voice|data|sms|generic)$/*$1/", utils.INFIELD_SEP), + &CfgCdrField{FieldId: utils.TOR, Value: utils.ParseRSRFieldsMustCompile("~7:s/^(voice|data|sms|mms|generic)$/*$1/", utils.INFIELD_SEP), FieldFilter: utils.ParseRSRFieldsMustCompile("", utils.INFIELD_SEP), Width: 0, Strip: "", Padding: "", Layout: "", Mandatory: false}, - &CfgCdrField{Tag: "", Type: "", FieldId: utils.ANSWER_TIME, Value: utils.ParseRSRFieldsMustCompile("2", utils.INFIELD_SEP), + &CfgCdrField{Tag: "", Type: "", FieldId: utils.ANSWER_TIME, Value: utils.ParseRSRFieldsMustCompile("1", utils.INFIELD_SEP), + FieldFilter: utils.ParseRSRFieldsMustCompile("", utils.INFIELD_SEP), Width: 0, Strip: "", Padding: "", Layout: "", Mandatory: false}, + &CfgCdrField{FieldId: utils.USAGE, Value: utils.ParseRSRFieldsMustCompile("~9:s/^(\\d+)$/${1}s/", utils.INFIELD_SEP), FieldFilter: utils.ParseRSRFieldsMustCompile("", utils.INFIELD_SEP), Width: 0, Strip: "", Padding: "", Layout: "", Mandatory: false}, }, TrailerFields: make([]*CfgCdrField, 0), }, } - eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc3/in"] = map[string]*CdrcConfig{ - "CDRC-CSV3": &CdrcConfig{ + eCgrCfg.CdrcProfiles["/tmp/cgrates/cdrc3/in"] = []*CdrcConfig{ + &CdrcConfig{ + ID: "CDRC-CSV3", Enabled: true, - Cdrs: "internal", + CdrsConns: []*HaPoolConfig{&HaPoolConfig{Address: utils.MetaInternal}}, CdrFormat: "csv", FieldSeparator: ',', DataUsageMultiplyFactor: 1024, @@ -189,6 +195,6 @@ func TestLoadCdrcConfigMultipleFiles(t *testing.T) { }, } if !reflect.DeepEqual(eCgrCfg.CdrcProfiles, cgrCfg.CdrcProfiles) { - t.Errorf("Expected: %+v, received: %+v", eCgrCfg.CdrcProfiles, cgrCfg.CdrcProfiles) + t.Errorf("Expected: \n%s\n, received: \n%s\n", utils.ToJSON(eCgrCfg.CdrcProfiles), utils.ToJSON(cgrCfg.CdrcProfiles)) } } diff --git a/config/daconfig.go b/config/daconfig.go index 6e465d6ae..d309a2290 100644 --- a/config/daconfig.go +++ b/config/daconfig.go @@ -28,8 +28,8 @@ type DiameterAgentCfg struct { Enabled bool // enables the diameter agent: Listen string // address where to listen for diameter requests DictionariesDir string - SMGeneric string // connection towards SMG component - PubSubS string // connection towards pubsubs + SMGenericConns []*HaPoolConfig // connections towards SMG component + PubSubConns []*HaPoolConfig // connection towards pubsubs CreateCDR bool DebitInterval time.Duration Timezone string // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> @@ -54,11 +54,19 @@ func (self *DiameterAgentCfg) loadFromJsonCfg(jsnCfg *DiameterAgentJsonCfg) erro if jsnCfg.Dictionaries_dir != nil { self.DictionariesDir = *jsnCfg.Dictionaries_dir } - if jsnCfg.Sm_generic != nil { - self.SMGeneric = *jsnCfg.Sm_generic + if jsnCfg.Sm_generic_conns != nil { + self.SMGenericConns = make([]*HaPoolConfig, len(*jsnCfg.Sm_generic_conns)) + for idx, jsnHaCfg := range *jsnCfg.Sm_generic_conns { + self.SMGenericConns[idx] = NewDfltHaPoolConfig() + self.SMGenericConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCfg.Pubsubs != nil { - self.PubSubS = *jsnCfg.Pubsubs + if jsnCfg.Pubsubs_conns != nil { + self.PubSubConns = make([]*HaPoolConfig, len(*jsnCfg.Pubsubs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Pubsubs_conns { + self.PubSubConns[idx] = NewDfltHaPoolConfig() + self.PubSubConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Create_cdr != nil { self.CreateCDR = *jsnCfg.Create_cdr diff --git a/config/libconfig.go b/config/libconfig.go index af5dfb6c4..1471fdaa0 100644 --- a/config/libconfig.go +++ b/config/libconfig.go @@ -26,12 +26,12 @@ import ( type CdrReplicationCfg struct { Transport string - Server string + Address string Synchronous bool Attempts int // Number of attempts if not success CdrFilter utils.RSRFields // Only replicate if the filters here are matching } func (rplCfg CdrReplicationCfg) FallbackFileName() string { - return fmt.Sprintf("cdr_%s_%s_%s.form", rplCfg.Transport, url.QueryEscape(rplCfg.Server), utils.GenUUID()) + return fmt.Sprintf("cdr_%s_%s_%s.form", rplCfg.Transport, url.QueryEscape(rplCfg.Address), utils.GenUUID()) } diff --git a/config/libconfig_json.go b/config/libconfig_json.go index bc94bca2d..eb2bf969b 100644 --- a/config/libconfig_json.go +++ b/config/libconfig_json.go @@ -25,10 +25,9 @@ type GeneralJsonCfg struct { Dbdata_encoding *string Tpexport_dir *string Http_failed_dir *string - Default_reqtype *string + Default_request_type *string Default_category *string Default_tenant *string - Default_subject *string Default_timezone *string Connect_attempts *int Reconnects *int @@ -50,7 +49,7 @@ type DbJsonCfg struct { Db_port *int Db_name *string Db_user *string - Db_passwd *string + Db_password *string Max_open_conns *int // Used only in case of storDb Max_idle_conns *int Load_history_size *int // Used in case of dataDb to limit the length of the loads history @@ -63,15 +62,16 @@ type BalancerJsonCfg struct { } // Rater config section -type RaterJsonCfg struct { - Enabled *bool - Balancer *string - Cdrstats *string - Historys *string - Pubsubs *string - Aliases *string - Users *string - Rp_subject_prefix_matching *bool +type RalsJsonCfg struct { + Enabled *bool + Balancer *string + Cdrstats_conns *[]*HaPoolJsonCfg + Historys_conns *[]*HaPoolJsonCfg + Pubsubs_conns *[]*HaPoolJsonCfg + Aliases_conns *[]*HaPoolJsonCfg + Users_conns *[]*HaPoolJsonCfg + Rp_subject_prefix_matching *bool + Lcr_subject_prefix_matching *bool } // Scheduler config section @@ -84,17 +84,17 @@ type CdrsJsonCfg struct { Enabled *bool Extra_fields *[]string Store_cdrs *bool - Rater *string - Pubsubs *string - Users *string - Aliases *string - Cdrstats *string + Rals_conns *[]*HaPoolJsonCfg + Pubsubs_conns *[]*HaPoolJsonCfg + Users_conns *[]*HaPoolJsonCfg + Aliases_conns *[]*HaPoolJsonCfg + Cdrstats_conns *[]*HaPoolJsonCfg Cdr_replication *[]*CdrReplicationJsonCfg } type CdrReplicationJsonCfg struct { Transport *string - Server *string + Address *string Synchronous *bool Attempts *int Cdr_filter *string @@ -143,9 +143,10 @@ type CdreJsonCfg struct { // Cdrc config section type CdrcJsonCfg struct { + Id *string Enabled *bool Dry_run *bool - Cdrs *string + Cdrs_conns *[]*HaPoolJsonCfg Cdr_format *string Field_separator *string Timezone *string @@ -166,20 +167,23 @@ type CdrcJsonCfg struct { // SM-Generic config section type SmGenericJsonCfg struct { - Enabled *bool - Listen_bijson *string - Rater *string - Cdrs *string - Debit_interval *string - Min_call_duration *string - Max_call_duration *string + Enabled *bool + Listen_bijson *string + Rals_conns *[]*HaPoolJsonCfg + Cdrs_conns *[]*HaPoolJsonCfg + Debit_interval *string + Min_call_duration *string + Max_call_duration *string + Session_ttl *string + Session_ttl_last_used *string + Session_ttl_usage *string } // SM-FreeSWITCH config section type SmFsJsonCfg struct { Enabled *bool - Rater *string - Cdrs *string + Rals_conns *[]*HaPoolJsonCfg + Cdrs_conns *[]*HaPoolJsonCfg Create_cdr *bool Extra_fields *[]string Debit_interval *string @@ -192,12 +196,17 @@ type SmFsJsonCfg struct { Subscribe_park *bool Channel_sync_interval *string Max_wait_connection *string - Connections *[]*FsConnJsonCfg + Event_socket_conns *[]*FsConnJsonCfg +} + +// Represents one connection instance towards a rater/cdrs server +type HaPoolJsonCfg struct { + Address *string } // Represents one connection instance towards FreeSWITCH type FsConnJsonCfg struct { - Server *string + Address *string Password *string Reconnects *int } @@ -205,18 +214,18 @@ type FsConnJsonCfg struct { // SM-Kamailio config section type SmKamJsonCfg struct { Enabled *bool - Rater *string - Cdrs *string + Rals_conns *[]*HaPoolJsonCfg + Cdrs_conns *[]*HaPoolJsonCfg Create_cdr *bool Debit_interval *string Min_call_duration *string Max_call_duration *string - Connections *[]*KamConnJsonCfg + Evapi_conns *[]*KamConnJsonCfg } // Represents one connection instance towards Kamailio type KamConnJsonCfg struct { - Evapi_addr *string + Address *string Reconnects *int } @@ -224,8 +233,8 @@ type KamConnJsonCfg struct { type SmOsipsJsonCfg struct { Enabled *bool Listen_udp *string - Rater *string - Cdrs *string + Rals_conns *[]*HaPoolJsonCfg + Cdrs_conns *[]*HaPoolJsonCfg Create_cdr *bool Debit_interval *string Min_call_duration *string @@ -242,11 +251,11 @@ type OsipsConnJsonCfg struct { // DiameterAgent configuration type DiameterAgentJsonCfg struct { - Enabled *bool // enables the diameter agent: - Listen *string // address where to listen for diameter requests - Dictionaries_dir *string // path towards additional dictionaries - Sm_generic *string // connection towards generic SM - Pubsubs *string // connection towards pubsubs + Enabled *bool // enables the diameter agent: + Listen *string // address where to listen for diameter requests + Dictionaries_dir *string // path towards additional dictionaries + Sm_generic_conns *[]*HaPoolJsonCfg // Connections towards generic SM + Pubsubs_conns *[]*HaPoolJsonCfg // connection towards pubsubs Create_cdr *bool Debit_interval *string Timezone *string // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> @@ -296,10 +305,10 @@ type UserServJsonCfg struct { // Mailer config section type MailerJsonCfg struct { - Server *string - Auth_user *string - Auth_passwd *string - From_address *string + Server *string + Auth_user *string + Auth_password *string + From_address *string } // SureTax config section diff --git a/config/smconfig.go b/config/smconfig.go index 50e503bce..a1113d7a2 100644 --- a/config/smconfig.go +++ b/config/smconfig.go @@ -35,8 +35,17 @@ func NewDfltHaPoolConfig() *HaPoolConfig { // One connection to Rater type HaPoolConfig struct { - Server string - Timeout time.Duration + Address string +} + +func (self *HaPoolConfig) loadFromJsonCfg(jsnCfg *HaPoolJsonCfg) error { + if jsnCfg == nil { + return nil + } + if jsnCfg.Address != nil { + self.Address = *jsnCfg.Address + } + return nil } // Returns the first cached default value for a SM-FreeSWITCH connection @@ -50,7 +59,7 @@ func NewDfltFsConnConfig() *FsConnConfig { // One connection to FreeSWITCH server type FsConnConfig struct { - Server string + Address string Password string Reconnects int } @@ -59,8 +68,8 @@ func (self *FsConnConfig) loadFromJsonCfg(jsnCfg *FsConnJsonCfg) error { if jsnCfg == nil { return nil } - if jsnCfg.Server != nil { - self.Server = *jsnCfg.Server + if jsnCfg.Address != nil { + self.Address = *jsnCfg.Address } if jsnCfg.Password != nil { self.Password = *jsnCfg.Password @@ -72,13 +81,16 @@ func (self *FsConnConfig) loadFromJsonCfg(jsnCfg *FsConnJsonCfg) error { } type SmGenericConfig struct { - Enabled bool - ListenBijson string - HaRater []*HaPoolConfig - HaCdrs []*HaPoolConfig - DebitInterval time.Duration - MinCallDuration time.Duration - MaxCallDuration time.Duration + Enabled bool + ListenBijson string + RALsConns []*HaPoolConfig + CDRsConns []*HaPoolConfig + DebitInterval time.Duration + MinCallDuration time.Duration + MaxCallDuration time.Duration + SessionTTL time.Duration + SessionTTLLastUsed *time.Duration + SessionTTLUsage *time.Duration } func (self *SmGenericConfig) loadFromJsonCfg(jsnCfg *SmGenericJsonCfg) error { @@ -92,11 +104,19 @@ func (self *SmGenericConfig) loadFromJsonCfg(jsnCfg *SmGenericJsonCfg) error { if jsnCfg.Listen_bijson != nil { self.ListenBijson = *jsnCfg.Listen_bijson } - if jsnCfg.Rater != nil { - self.HaRater = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Rater, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Rals_conns != nil { + self.RALsConns = make([]*HaPoolConfig, len(*jsnCfg.Rals_conns)) + for idx, jsnHaCfg := range *jsnCfg.Rals_conns { + self.RALsConns[idx] = NewDfltHaPoolConfig() + self.RALsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCfg.Cdrs != nil { - self.HaCdrs = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Cdrs, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Cdrs_conns != nil { + self.CDRsConns = make([]*HaPoolConfig, len(*jsnCfg.Cdrs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Cdrs_conns { + self.CDRsConns[idx] = NewDfltHaPoolConfig() + self.CDRsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Debit_interval != nil { if self.DebitInterval, err = utils.ParseDurationWithSecs(*jsnCfg.Debit_interval); err != nil { @@ -113,13 +133,32 @@ func (self *SmGenericConfig) loadFromJsonCfg(jsnCfg *SmGenericJsonCfg) error { return err } } + if jsnCfg.Session_ttl != nil { + if self.SessionTTL, err = utils.ParseDurationWithSecs(*jsnCfg.Session_ttl); err != nil { + return err + } + } + if jsnCfg.Session_ttl_last_used != nil { + if sessionTTLLastUsed, err := utils.ParseDurationWithSecs(*jsnCfg.Session_ttl_last_used); err != nil { + return err + } else { + self.SessionTTLLastUsed = &sessionTTLLastUsed + } + } + if jsnCfg.Session_ttl_usage != nil { + if sessionTTLUsage, err := utils.ParseDurationWithSecs(*jsnCfg.Session_ttl_usage); err != nil { + return err + } else { + self.SessionTTLUsage = &sessionTTLUsage + } + } return nil } type SmFsConfig struct { Enabled bool - HaRater []*HaPoolConfig - HaCdrs []*HaPoolConfig + RALsConns []*HaPoolConfig + CDRsConns []*HaPoolConfig CreateCdr bool ExtraFields []*utils.RSRField DebitInterval time.Duration @@ -132,7 +171,7 @@ type SmFsConfig struct { SubscribePark bool ChannelSyncInterval time.Duration MaxWaitConnection time.Duration - Connections []*FsConnConfig + EventSocketConns []*FsConnConfig } func (self *SmFsConfig) loadFromJsonCfg(jsnCfg *SmFsJsonCfg) error { @@ -143,11 +182,19 @@ func (self *SmFsConfig) loadFromJsonCfg(jsnCfg *SmFsJsonCfg) error { if jsnCfg.Enabled != nil { self.Enabled = *jsnCfg.Enabled } - if jsnCfg.Rater != nil { - self.HaRater = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Rater, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Rals_conns != nil { + self.RALsConns = make([]*HaPoolConfig, len(*jsnCfg.Rals_conns)) + for idx, jsnHaCfg := range *jsnCfg.Rals_conns { + self.RALsConns[idx] = NewDfltHaPoolConfig() + self.RALsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCfg.Cdrs != nil { - self.HaCdrs = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Cdrs, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Cdrs_conns != nil { + self.CDRsConns = make([]*HaPoolConfig, len(*jsnCfg.Cdrs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Cdrs_conns { + self.CDRsConns[idx] = NewDfltHaPoolConfig() + self.CDRsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Create_cdr != nil { self.CreateCdr = *jsnCfg.Create_cdr @@ -199,11 +246,11 @@ func (self *SmFsConfig) loadFromJsonCfg(jsnCfg *SmFsJsonCfg) error { return err } } - if jsnCfg.Connections != nil { - self.Connections = make([]*FsConnConfig, len(*jsnCfg.Connections)) - for idx, jsnConnCfg := range *jsnCfg.Connections { - self.Connections[idx] = NewDfltFsConnConfig() - self.Connections[idx].loadFromJsonCfg(jsnConnCfg) + if jsnCfg.Event_socket_conns != nil { + self.EventSocketConns = make([]*FsConnConfig, len(*jsnCfg.Event_socket_conns)) + for idx, jsnConnCfg := range *jsnCfg.Event_socket_conns { + self.EventSocketConns[idx] = NewDfltFsConnConfig() + self.EventSocketConns[idx].loadFromJsonCfg(jsnConnCfg) } } return nil @@ -220,7 +267,7 @@ func NewDfltKamConnConfig() *KamConnConfig { // Represents one connection instance towards Kamailio type KamConnConfig struct { - EvapiAddr string + Address string Reconnects int } @@ -228,8 +275,8 @@ func (self *KamConnConfig) loadFromJsonCfg(jsnCfg *KamConnJsonCfg) error { if jsnCfg == nil { return nil } - if jsnCfg.Evapi_addr != nil { - self.EvapiAddr = *jsnCfg.Evapi_addr + if jsnCfg.Address != nil { + self.Address = *jsnCfg.Address } if jsnCfg.Reconnects != nil { self.Reconnects = *jsnCfg.Reconnects @@ -240,13 +287,13 @@ func (self *KamConnConfig) loadFromJsonCfg(jsnCfg *KamConnJsonCfg) error { // SM-Kamailio config section type SmKamConfig struct { Enabled bool - HaRater []*HaPoolConfig - HaCdrs []*HaPoolConfig + RALsConns []*HaPoolConfig + CDRsConns []*HaPoolConfig CreateCdr bool DebitInterval time.Duration MinCallDuration time.Duration MaxCallDuration time.Duration - Connections []*KamConnConfig + EvapiConns []*KamConnConfig } func (self *SmKamConfig) loadFromJsonCfg(jsnCfg *SmKamJsonCfg) error { @@ -257,11 +304,19 @@ func (self *SmKamConfig) loadFromJsonCfg(jsnCfg *SmKamJsonCfg) error { if jsnCfg.Enabled != nil { self.Enabled = *jsnCfg.Enabled } - if jsnCfg.Rater != nil { - self.HaRater = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Rater, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Rals_conns != nil { + self.RALsConns = make([]*HaPoolConfig, len(*jsnCfg.Rals_conns)) + for idx, jsnHaCfg := range *jsnCfg.Rals_conns { + self.RALsConns[idx] = NewDfltHaPoolConfig() + self.RALsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCfg.Cdrs != nil { - self.HaCdrs = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Cdrs, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Cdrs_conns != nil { + self.CDRsConns = make([]*HaPoolConfig, len(*jsnCfg.Cdrs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Cdrs_conns { + self.CDRsConns[idx] = NewDfltHaPoolConfig() + self.CDRsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Create_cdr != nil { self.CreateCdr = *jsnCfg.Create_cdr @@ -281,11 +336,11 @@ func (self *SmKamConfig) loadFromJsonCfg(jsnCfg *SmKamJsonCfg) error { return err } } - if jsnCfg.Connections != nil { - self.Connections = make([]*KamConnConfig, len(*jsnCfg.Connections)) - for idx, jsnConnCfg := range *jsnCfg.Connections { - self.Connections[idx] = NewDfltKamConnConfig() - self.Connections[idx].loadFromJsonCfg(jsnConnCfg) + if jsnCfg.Evapi_conns != nil { + self.EvapiConns = make([]*KamConnConfig, len(*jsnCfg.Evapi_conns)) + for idx, jsnConnCfg := range *jsnCfg.Evapi_conns { + self.EvapiConns[idx] = NewDfltKamConnConfig() + self.EvapiConns[idx].loadFromJsonCfg(jsnConnCfg) } } return nil @@ -311,8 +366,8 @@ func (self *OsipsConnConfig) loadFromJsonCfg(jsnCfg *OsipsConnJsonCfg) error { type SmOsipsConfig struct { Enabled bool ListenUdp string - HaRater []*HaPoolConfig - HaCdrs []*HaPoolConfig + RALsConns []*HaPoolConfig + CDRsConns []*HaPoolConfig CreateCdr bool DebitInterval time.Duration MinCallDuration time.Duration @@ -329,11 +384,19 @@ func (self *SmOsipsConfig) loadFromJsonCfg(jsnCfg *SmOsipsJsonCfg) error { if jsnCfg.Listen_udp != nil { self.ListenUdp = *jsnCfg.Listen_udp } - if jsnCfg.Rater != nil { - self.HaRater = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Rater, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Rals_conns != nil { + self.RALsConns = make([]*HaPoolConfig, len(*jsnCfg.Rals_conns)) + for idx, jsnHaCfg := range *jsnCfg.Rals_conns { + self.RALsConns[idx] = NewDfltHaPoolConfig() + self.RALsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } - if jsnCfg.Cdrs != nil { - self.HaCdrs = []*HaPoolConfig{&HaPoolConfig{Server: *jsnCfg.Cdrs, Timeout: time.Duration(1) * time.Second}} + if jsnCfg.Cdrs_conns != nil { + self.CDRsConns = make([]*HaPoolConfig, len(*jsnCfg.Cdrs_conns)) + for idx, jsnHaCfg := range *jsnCfg.Cdrs_conns { + self.CDRsConns[idx] = NewDfltHaPoolConfig() + self.CDRsConns[idx].loadFromJsonCfg(jsnHaCfg) + } } if jsnCfg.Create_cdr != nil { self.CreateCdr = *jsnCfg.Create_cdr diff --git a/config/smconfig_test.go b/config/smconfig_test.go index 4276f9089..c279de16f 100644 --- a/config/smconfig_test.go +++ b/config/smconfig_test.go @@ -29,14 +29,14 @@ func TesSmFsConfigLoadFromJsonCfg(t *testing.T) { Enabled: utils.BoolPointer(true), Create_cdr: utils.BoolPointer(true), Subscribe_park: utils.BoolPointer(true), - Connections: &[]*FsConnJsonCfg{ + Event_socket_conns: &[]*FsConnJsonCfg{ &FsConnJsonCfg{ - Server: utils.StringPointer("1.2.3.4:8021"), + Address: utils.StringPointer("1.2.3.4:8021"), Password: utils.StringPointer("ClueCon"), Reconnects: utils.IntPointer(5), }, &FsConnJsonCfg{ - Server: utils.StringPointer("2.3.4.5:8021"), + Address: utils.StringPointer("2.3.4.5:8021"), Password: utils.StringPointer("ClueCon"), Reconnects: utils.IntPointer(5), }, @@ -45,9 +45,9 @@ func TesSmFsConfigLoadFromJsonCfg(t *testing.T) { eSmFsConfig := &SmFsConfig{Enabled: true, CreateCdr: true, SubscribePark: true, - Connections: []*FsConnConfig{ - &FsConnConfig{Server: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 5}, - &FsConnConfig{Server: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 5}, + EventSocketConns: []*FsConnConfig{ + &FsConnConfig{Address: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 5}, + &FsConnConfig{Address: "1.2.3.4:8021", Password: "ClueCon", Reconnects: 5}, }, } smFsCfg := new(SmFsConfig) diff --git a/console/trigger_add.go b/console/account_trigger_add.go similarity index 76% rename from console/trigger_add.go rename to console/account_trigger_add.go index bef41f910..3577aee9f 100644 --- a/console/trigger_add.go +++ b/console/account_trigger_add.go @@ -21,8 +21,8 @@ package console import "github.com/cgrates/cgrates/apier/v1" func init() { - c := &CmdAddTriggers{ - name: "triggers_add", + c := &CmdAccountAddTriggers{ + name: "account_triggers_add", rpcMethod: "ApierV1.AddAccountActionTriggers", rpcParams: &v1.AttrAddAccountActionTriggers{}, } @@ -31,33 +31,33 @@ func init() { } // Commander implementation -type CmdAddTriggers struct { +type CmdAccountAddTriggers struct { name string rpcMethod string rpcParams *v1.AttrAddAccountActionTriggers *CommandExecuter } -func (self *CmdAddTriggers) Name() string { +func (self *CmdAccountAddTriggers) Name() string { return self.name } -func (self *CmdAddTriggers) RpcMethod() string { +func (self *CmdAccountAddTriggers) RpcMethod() string { return self.rpcMethod } -func (self *CmdAddTriggers) RpcParams(reset bool) interface{} { +func (self *CmdAccountAddTriggers) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { self.rpcParams = &v1.AttrAddAccountActionTriggers{} } return self.rpcParams } -func (self *CmdAddTriggers) PostprocessRpcParams() error { +func (self *CmdAccountAddTriggers) PostprocessRpcParams() error { return nil } -func (self *CmdAddTriggers) RpcResult() interface{} { +func (self *CmdAccountAddTriggers) RpcResult() interface{} { var s string return &s } diff --git a/console/trigger_remove.go b/console/account_trigger_remove.go similarity index 75% rename from console/trigger_remove.go rename to console/account_trigger_remove.go index a460d8528..5d2739692 100644 --- a/console/trigger_remove.go +++ b/console/account_trigger_remove.go @@ -21,8 +21,8 @@ package console import "github.com/cgrates/cgrates/apier/v1" func init() { - c := &CmdRemoveTriggers{ - name: "triggers_remove", + c := &CmdAccountRemoveTriggers{ + name: "account_triggers_remove", rpcMethod: "ApierV1.RemoveAccountActionTriggers", rpcParams: &v1.AttrRemoveAccountActionTriggers{}, } @@ -31,33 +31,33 @@ func init() { } // Commander implementation -type CmdRemoveTriggers struct { +type CmdAccountRemoveTriggers struct { name string rpcMethod string rpcParams *v1.AttrRemoveAccountActionTriggers *CommandExecuter } -func (self *CmdRemoveTriggers) Name() string { +func (self *CmdAccountRemoveTriggers) Name() string { return self.name } -func (self *CmdRemoveTriggers) RpcMethod() string { +func (self *CmdAccountRemoveTriggers) RpcMethod() string { return self.rpcMethod } -func (self *CmdRemoveTriggers) RpcParams(reset bool) interface{} { +func (self *CmdAccountRemoveTriggers) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { self.rpcParams = &v1.AttrRemoveAccountActionTriggers{} } return self.rpcParams } -func (self *CmdRemoveTriggers) PostprocessRpcParams() error { +func (self *CmdAccountRemoveTriggers) PostprocessRpcParams() error { return nil } -func (self *CmdRemoveTriggers) RpcResult() interface{} { +func (self *CmdAccountRemoveTriggers) RpcResult() interface{} { var s string return &s } diff --git a/console/trigger_reset.go b/console/account_trigger_reset.go similarity index 75% rename from console/trigger_reset.go rename to console/account_trigger_reset.go index 0d5be9f24..d545361db 100644 --- a/console/trigger_reset.go +++ b/console/account_trigger_reset.go @@ -21,8 +21,8 @@ package console import "github.com/cgrates/cgrates/apier/v1" func init() { - c := &CmdResetTriggers{ - name: "triggers_reset", + c := &CmdAccountResetTriggers{ + name: "account_triggers_reset", rpcMethod: "ApierV1.ResetAccountActionTriggers", rpcParams: &v1.AttrRemoveAccountActionTriggers{}, } @@ -31,33 +31,33 @@ func init() { } // Commander implementation -type CmdResetTriggers struct { +type CmdAccountResetTriggers struct { name string rpcMethod string rpcParams *v1.AttrRemoveAccountActionTriggers *CommandExecuter } -func (self *CmdResetTriggers) Name() string { +func (self *CmdAccountResetTriggers) Name() string { return self.name } -func (self *CmdResetTriggers) RpcMethod() string { +func (self *CmdAccountResetTriggers) RpcMethod() string { return self.rpcMethod } -func (self *CmdResetTriggers) RpcParams(reset bool) interface{} { +func (self *CmdAccountResetTriggers) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { self.rpcParams = &v1.AttrRemoveAccountActionTriggers{} } return self.rpcParams } -func (self *CmdResetTriggers) PostprocessRpcParams() error { +func (self *CmdAccountResetTriggers) PostprocessRpcParams() error { return nil } -func (self *CmdResetTriggers) RpcResult() interface{} { +func (self *CmdAccountResetTriggers) RpcResult() interface{} { var s string return &s } diff --git a/console/account_trigger_set.go b/console/account_trigger_set.go new file mode 100644 index 000000000..84ed47079 --- /dev/null +++ b/console/account_trigger_set.go @@ -0,0 +1,63 @@ +/* +Rating system designed to be used in VoIP Carriers World +Copyright (C) 2012-2015 ITsysCOM + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package console + +import "github.com/cgrates/cgrates/apier/v1" + +func init() { + c := &CmdAccountSetTriggers{ + name: "account_triggers_set", + rpcMethod: "ApierV1.SetAccountActionTriggers", + rpcParams: &v1.AttrSetAccountActionTriggers{}, + } + commands[c.Name()] = c + c.CommandExecuter = &CommandExecuter{c} +} + +// Commander implementation +type CmdAccountSetTriggers struct { + name string + rpcMethod string + rpcParams *v1.AttrSetAccountActionTriggers + *CommandExecuter +} + +func (self *CmdAccountSetTriggers) Name() string { + return self.name +} + +func (self *CmdAccountSetTriggers) RpcMethod() string { + return self.rpcMethod +} + +func (self *CmdAccountSetTriggers) RpcParams(reset bool) interface{} { + if reset || self.rpcParams == nil { + self.rpcParams = &v1.AttrSetAccountActionTriggers{} + } + return self.rpcParams +} + +func (self *CmdAccountSetTriggers) PostprocessRpcParams() error { + return nil +} + +func (self *CmdAccountSetTriggers) RpcResult() interface{} { + var s string + return &s +} diff --git a/console/actions.go b/console/actions.go index 25e9af0e5..1d2504029 100644 --- a/console/actions.go +++ b/console/actions.go @@ -18,7 +18,10 @@ along with this program. If not, see package console -import "github.com/cgrates/cgrates/utils" +import ( + "github.com/cgrates/cgrates/apier/v2" + "github.com/cgrates/cgrates/engine" +) func init() { c := &CmdGetActions{ @@ -33,7 +36,7 @@ func init() { type CmdGetActions struct { name string rpcMethod string - rpcParams *StringWrapper + rpcParams *v2.AttrGetActions *CommandExecuter } @@ -47,7 +50,7 @@ func (self *CmdGetActions) RpcMethod() string { func (self *CmdGetActions) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { - self.rpcParams = &StringWrapper{} + self.rpcParams = &v2.AttrGetActions{} } return self.rpcParams } @@ -57,6 +60,6 @@ func (self *CmdGetActions) PostprocessRpcParams() error { } func (self *CmdGetActions) RpcResult() interface{} { - a := make([]*utils.TPAction, 0) + a := make(map[string]engine.Actions, 0) return &a } diff --git a/console/actions_remove.go b/console/actions_remove.go new file mode 100644 index 000000000..c7d5f54db --- /dev/null +++ b/console/actions_remove.go @@ -0,0 +1,62 @@ +/* +Real-time Charging System for Telecom & ISP environments +Copyright (C) 2012-2015 ITsysCOM GmbH + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package console + +import "github.com/cgrates/cgrates/apier/v1" + +func init() { + c := &CmdRemoveActions{ + name: "actions_remove", + rpcMethod: "ApierV1.RemActions", + } + commands[c.Name()] = c + c.CommandExecuter = &CommandExecuter{c} +} + +// Commander implementation +type CmdRemoveActions struct { + name string + rpcMethod string + rpcParams *v1.AttrRemActions + *CommandExecuter +} + +func (self *CmdRemoveActions) Name() string { + return self.name +} + +func (self *CmdRemoveActions) RpcMethod() string { + return self.rpcMethod +} + +func (self *CmdRemoveActions) RpcParams(reset bool) interface{} { + if reset || self.rpcParams == nil { + self.rpcParams = &v1.AttrRemActions{} + } + return self.rpcParams +} + +func (self *CmdRemoveActions) PostprocessRpcParams() error { + return nil +} + +func (self *CmdRemoveActions) RpcResult() interface{} { + var s string + return &s +} diff --git a/console/aliases_set.go b/console/aliases_set.go index 991836167..e091be6d3 100644 --- a/console/aliases_set.go +++ b/console/aliases_set.go @@ -27,7 +27,7 @@ func init() { c := &CmdSetAliases{ name: "aliases_set", rpcMethod: "AliasesV1.SetAlias", - rpcParams: &engine.Alias{Direction: utils.OUT}, + rpcParams: &engine.AttrAddAlias{Alias: &engine.Alias{Direction: utils.OUT}}, } commands[c.Name()] = c c.CommandExecuter = &CommandExecuter{c} @@ -37,7 +37,7 @@ func init() { type CmdSetAliases struct { name string rpcMethod string - rpcParams *engine.Alias + rpcParams *engine.AttrAddAlias *CommandExecuter } @@ -51,7 +51,7 @@ func (self *CmdSetAliases) RpcMethod() string { func (self *CmdSetAliases) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { - self.rpcParams = &engine.Alias{Direction: utils.OUT} + self.rpcParams = &engine.AttrAddAlias{Alias: &engine.Alias{Direction: utils.OUT}} } return self.rpcParams } diff --git a/console/trigger_set.go b/console/trigger_set.go index 2e9095d95..e1a72dd67 100644 --- a/console/trigger_set.go +++ b/console/trigger_set.go @@ -23,8 +23,8 @@ import "github.com/cgrates/cgrates/apier/v1" func init() { c := &CmdSetTriggers{ name: "triggers_set", - rpcMethod: "ApierV1.SetAccountActionTriggers", - rpcParams: &v1.AttrSetAccountActionTriggers{}, + rpcMethod: "ApierV1.SetActionTrigger", + rpcParams: &v1.AttrSetActionTrigger{}, } commands[c.Name()] = c c.CommandExecuter = &CommandExecuter{c} @@ -34,7 +34,7 @@ func init() { type CmdSetTriggers struct { name string rpcMethod string - rpcParams *v1.AttrSetAccountActionTriggers + rpcParams *v1.AttrSetActionTrigger *CommandExecuter } @@ -48,7 +48,7 @@ func (self *CmdSetTriggers) RpcMethod() string { func (self *CmdSetTriggers) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { - self.rpcParams = &v1.AttrSetAccountActionTriggers{} + self.rpcParams = &v1.AttrSetActionTrigger{} } return self.rpcParams } diff --git a/console/aliases_update.go b/console/triggers.go similarity index 64% rename from console/aliases_update.go rename to console/triggers.go index 7063141be..454d4994c 100644 --- a/console/aliases_update.go +++ b/console/triggers.go @@ -19,48 +19,48 @@ along with this program. If not, see package console import ( + "github.com/cgrates/cgrates/apier/v1" "github.com/cgrates/cgrates/engine" - "github.com/cgrates/cgrates/utils" ) func init() { - c := &CmdUpdateAliases{ - name: "aliases_update", - rpcMethod: "AliasesV1.UpdateAlias", - rpcParams: &engine.Alias{Direction: utils.OUT}, + c := &CmdGetTriggers{ + name: "triggers", + rpcMethod: "ApierV1.GetActionTriggers", + rpcParams: &v1.AttrGetActionTriggers{}, } commands[c.Name()] = c c.CommandExecuter = &CommandExecuter{c} } // Commander implementation -type CmdUpdateAliases struct { +type CmdGetTriggers struct { name string rpcMethod string - rpcParams *engine.Alias + rpcParams *v1.AttrGetActionTriggers *CommandExecuter } -func (self *CmdUpdateAliases) Name() string { +func (self *CmdGetTriggers) Name() string { return self.name } -func (self *CmdUpdateAliases) RpcMethod() string { +func (self *CmdGetTriggers) RpcMethod() string { return self.rpcMethod } -func (self *CmdUpdateAliases) RpcParams(reset bool) interface{} { +func (self *CmdGetTriggers) RpcParams(reset bool) interface{} { if reset || self.rpcParams == nil { - self.rpcParams = &engine.Alias{Direction: utils.OUT} + self.rpcParams = &v1.AttrGetActionTriggers{} } return self.rpcParams } -func (self *CmdUpdateAliases) PostprocessRpcParams() error { +func (self *CmdGetTriggers) PostprocessRpcParams() error { return nil } -func (self *CmdUpdateAliases) RpcResult() interface{} { - var s string - return &s +func (self *CmdGetTriggers) RpcResult() interface{} { + atr := engine.ActionTriggers{} + return &atr } diff --git a/data/conf/cgrates/cgrates.json b/data/conf/cgrates/cgrates.json index bc740467c..b08f4d751 100644 --- a/data/conf/cgrates/cgrates.json +++ b/data/conf/cgrates/cgrates.json @@ -6,333 +6,373 @@ // This file contains the default configuration hardcoded into CGRateS. // This is what you get when you load CGRateS with an empty configuration file. -//"general": { -// "http_skip_tls_verify": false, // if enabled Http Client will accept any TLS certificate -// "rounding_decimals": 5, // system level precision for floats -// "dbdata_encoding": "msgpack", // encoding used to store object data in strings: -// "tpexport_dir": "/var/log/cgrates/tpe", // path towards export folder for offline Tariff Plans -// "http_failed_dir": "/var/log/cgrates/http_failed", // directory path where we store failed http requests -// "default_reqtype": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> -// "default_category": "call", // default Type of Record to consider when missing from requests -// "default_tenant": "cgrates.org", // default Tenant to consider when missing from requests -// "default_subject": "cgrates", // default rating Subject to consider when missing from requests -// "default_timezone": "Local", // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> -// "connect_attempts": 3, // initial server connect attempts -// "reconnects": -1, // number of retries in case of connection lost -// "response_cache_ttl": "3s", // the life span of a cached response -// "internal_ttl": "2m", // maximum duration to wait for internal connections before giving up -//}, +// "general": { +// "http_skip_tls_verify": false, // if enabled Http Client will accept any TLS certificate +// "rounding_decimals": 5, // system level precision for floats +// "dbdata_encoding": "msgpack", // encoding used to store object data in strings: +// "tpexport_dir": "/var/log/cgrates/tpe", // path towards export folder for offline Tariff Plans +// "http_failed_dir": "/var/log/cgrates/http_failed", // directory path where we store failed http requests +// "default_request_type": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> +// "default_category": "call", // default category to consider when missing from requests +// "default_tenant": "cgrates.org", // default tenant to consider when missing from requests +// "default_timezone": "Local", // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> +// "connect_attempts": 3, // initial server connect attempts +// "reconnects": -1, // number of retries in case of connection lost +// "response_cache_ttl": "0s", // the life span of a cached response +// "internal_ttl": "2m", // maximum duration to wait for internal connections before giving up +// }, -//"listen": { -// "rpc_json": "127.0.0.1:2012", // RPC JSON listening address -// "rpc_gob": "127.0.0.1:2013", // RPC GOB listening address -// "http": "127.0.0.1:2080", // HTTP listening address -//}, +// "listen": { +// "rpc_json": "127.0.0.1:2012", // RPC JSON listening address +// "rpc_gob": "127.0.0.1:2013", // RPC GOB listening address +// "http": "127.0.0.1:2080", // HTTP listening address +// }, -//"tariffplan_db": { // database used to store active tariff plan configuration -// "db_type": "redis", // tariffplan_db type: -// "db_host": "127.0.0.1", // tariffplan_db host address -// "db_port": 6379, // port to reach the tariffplan_db -// "db_name": "10", // tariffplan_db name to connect to -// "db_user": "", // sername to use when connecting to tariffplan_db -// "db_passwd": "", // password to use when connecting to tariffplan_db -//}, +// "tariffplan_db": { // database used to store active tariff plan configuration +// "db_type": "redis", // tariffplan_db type: +// "db_host": "127.0.0.1", // tariffplan_db host address +// "db_port": 6379, // port to reach the tariffplan_db +// "db_name": "10", // tariffplan_db name to connect to +// "db_user": "", // sername to use when connecting to tariffplan_db +// "db_password": "", // password to use when connecting to tariffplan_db +// }, -//"data_db": { // database used to store runtime data (eg: accounts, cdr stats) -// "db_type": "redis", // data_db type: -// "db_host": "127.0.0.1", // data_db host address -// "db_port": 6379, // data_db port to reach the database -// "db_name": "11", // data_db database name to connect to -// "db_user": "", // username to use when connecting to data_db -// "db_passwd": "", // password to use when connecting to data_db -// "load_history_size": 10, // Number of records in the load history -//}, +// "data_db": { // database used to store runtime data (eg: accounts, cdr stats) +// "db_type": "redis", // data_db type: +// "db_host": "127.0.0.1", // data_db host address +// "db_port": 6379, // data_db port to reach the database +// "db_name": "11", // data_db database name to connect to +// "db_user": "", // username to use when connecting to data_db +// "db_password": "", // password to use when connecting to data_db +// "load_history_size": 10, // Number of records in the load history +// }, -//"stor_db": { // database used to store offline tariff plans and CDRs -// "db_type": "mysql", // stor database type to use: -// "db_host": "127.0.0.1", // the host to connect to -// "db_port": 3306, // the port to reach the stordb -// "db_name": "cgrates", // stor database name -// "db_user": "cgrates", // username to use when connecting to stordb -// "db_passwd": "CGRateS.org", // password to use when connecting to stordb -// "max_open_conns": 100, // maximum database connections opened -// "max_idle_conns": 10, // maximum database connections idle -//}, +// "stor_db": { // database used to store offline tariff plans and CDRs +// "db_type": "mysql", // stor database type to use: +// "db_host": "127.0.0.1", // the host to connect to +// "db_port": 3306, // the port to reach the stordb +// "db_name": "cgrates", // stor database name +// "db_user": "cgrates", // username to use when connecting to stordb +// "db_password": "CGRateS.org", // password to use when connecting to stordb +// "max_open_conns": 100, // maximum database connections opened +// "max_idle_conns": 10, // maximum database connections idle +// "cdrs_indexes": [], // indexes on cdrs table to speed up queries, used only in case of mongo +// }, -//"balancer": { -// "enabled": false, // start Balancer service: -//}, +// "balancer": { +// "enabled": false, // start Balancer service: +// }, -//"rater": { -// "enabled": false, // enable Rater service: -// "balancer": "", // register to balancer as worker: <""|internal|x.y.z.y:1234> -// "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality: <""|internal|x.y.z.y:1234> -// "historys": "", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> -// "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> -// "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> -// "aliases": "", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> -// "rp_subject_prefix_matching": false, // enables prefix matching for the rating profile subject -//}, +// "rals": { +// "enabled": false, // enable Rater service: +// "balancer": "", // register to balancer as worker: <""|*internal|x.y.z.y:1234> +// "cdrstats_conns": [], // address where to reach the cdrstats service, empty to disable stats functionality: <""|*internal|x.y.z.y:1234> +// "historys_conns": [], // address where to reach the history service, empty to disable history functionality: <""|*internal|x.y.z.y:1234> +// "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> +// "users_conns": [], // address where to reach the user service, empty to disable user profile functionality: <""|*internal|x.y.z.y:1234> +// "aliases_conns": [], // address where to reach the aliases service, empty to disable aliases functionality: <""|*internal|x.y.z.y:1234> +// "rp_subject_prefix_matching": false, // enables prefix matching for the rating profile subject +// "lcr_subject_prefix_matching": false // enables prefix matching for the lcr subject +// }, -//"scheduler": { -// "enabled": false, // start Scheduler service: -//}, +// "scheduler": { +// "enabled": false, // start Scheduler service: +// }, -//"cdrs": { -// "enabled": false, // start the CDR Server service: -// "extra_fields": [], // extra fields to store in CDRs for non-generic CDRs -// "store_cdrs": true, // store cdrs in storDb -// "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> -// "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> -// "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> -// "aliases": "", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> -// "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> -// "cdr_replication":[], // replicate the raw CDR to a number of servers -//}, +// "cdrs": { +// "enabled": false, // start the CDR Server service: +// "extra_fields": [], // extra fields to store in CDRs for non-generic CDRs +// "store_cdrs": true, // store cdrs in storDb +// "rals_conns": [ +// {"address": "*internal"} // address where to reach the Rater for cost calculation, empty to disable functionality: <""|*internal|x.y.z.y:1234> +// ], +// "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> +// "users_conns": [], // address where to reach the user service, empty to disable user profile functionality: <""|*internal|x.y.z.y:1234> +// "aliases_conns": [], // address where to reach the aliases service, empty to disable aliases functionality: <""|*internal|x.y.z.y:1234> +// "cdrstats_conns": [], // address where to reach the cdrstats service, empty to disable stats functionality<""|*internal|x.y.z.y:1234> +// "cdr_replication":[] // replicate the raw CDR to a number of servers +// }, -//"cdrstats": { -// "enabled": false, // starts the cdrstats service: -// "save_interval": "1m", // interval to save changed stats into dataDb storage -//}, +// "cdrstats": { +// "enabled": false, // starts the cdrstats service: +// "save_interval": "1m", // interval to save changed stats into dataDb storage +// }, -//"cdre": { -// "*default": { -// "cdr_format": "csv", // exported CDRs format -// "field_separator": ",", -// "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) -// "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) -// "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems) -// "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT -// "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding -// "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents) -// "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export -// "mask_length": 0, // length of the destination suffix to be masked -// "export_dir": "/var/log/cgrates/cdre", // path where the exported CDRs will be placed -// "header_fields": [], // template of the exported header fields -// "content_fields": [ // template of the exported content fields -// {"tag": "CgrId", "field_id": "CgrId", "type": "*composed", "value": "CgrId"}, -// {"tag":"RunId", "field_id": "MediationRunId", "type": "*composed", "value": "MediationRunId"}, -// {"tag":"Tor", "field_id": "TOR", "type": "*composed", "value": "TOR"}, -// {"tag":"AccId", "field_id": "AccId", "type": "*composed", "value": "AccId"}, -// {"tag":"ReqType", "field_id": "ReqType", "type": "*composed", "value": "ReqType"}, -// {"tag":"Direction", "field_id": "Direction", "type": "*composed", "value": "Direction"}, -// {"tag":"Tenant", "field_id": "Tenant", "type": "*composed", "value": "Tenant"}, -// {"tag":"Category", "field_id": "Category", "type": "*composed", "value": "Category"}, -// {"tag":"Account", "field_id": "Account", "type": "*composed", "value": "Account"}, -// {"tag":"Subject", "field_id": "Subject", "type": "*composed", "value": "Subject"}, -// {"tag":"Destination", "field_id": "Destination", "type": "*composed", "value": "Destination"}, -// {"tag":"SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "SetupTime", "layout": "2006-01-02T15:04:05Z07:00"}, -// {"tag":"AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "AnswerTime", "layout": "2006-01-02T15:04:05Z07:00"}, -// {"tag":"Usage", "field_id": "Usage", "type": "*composed", "value": "Usage"}, -// {"tag":"Cost", "field_id": "Cost", "type": "*composed", "value": "Cost"}, -// ], -// "trailer_fields": [], // template of the exported trailer fields -// } -//}, +// "cdre": { +// "*default": { +// "cdr_format": "csv", // exported CDRs format +// "field_separator": ",", +// "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) +// "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) +// "mms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from MMS unit to call duration in some billing systems) +// "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems) +// "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT +// "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding +// "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents) +// "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export +// "mask_length": 0, // length of the destination suffix to be masked +// "export_folder": "/var/log/cgrates/cdre", // path where the exported CDRs will be placed +// "header_fields": [], // template of the exported header fields +// "content_fields": [ // template of the exported content fields +// {"tag": "CGRID", "field_id": "CGRID", "type": "*composed", "value": "CGRID"}, +// {"tag":"RunID", "field_id": "RunID", "type": "*composed", "value": "RunID"}, +// {"tag":"TOR", "field_id": "ToR", "type": "*composed", "value": "ToR"}, +// {"tag":"OriginID", "field_id": "OriginID", "type": "*composed", "value": "OriginID"}, +// {"tag":"RequestType", "field_id": "RequestType", "type": "*composed", "value": "RequestType"}, +// {"tag":"Direction", "field_id": "Direction", "type": "*composed", "value": "Direction"}, +// {"tag":"Tenant", "field_id": "Tenant", "type": "*composed", "value": "Tenant"}, +// {"tag":"Category", "field_id": "Category", "type": "*composed", "value": "Category"}, +// {"tag":"Account", "field_id": "Account", "type": "*composed", "value": "Account"}, +// {"tag":"Subject", "field_id": "Subject", "type": "*composed", "value": "Subject"}, +// {"tag":"Destination", "field_id": "Destination", "type": "*composed", "value": "Destination"}, +// {"tag":"SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "SetupTime", "layout": "2006-01-02T15:04:05Z07:00"}, +// {"tag":"AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "AnswerTime", "layout": "2006-01-02T15:04:05Z07:00"}, +// {"tag":"Usage", "field_id": "Usage", "type": "*composed", "value": "Usage"}, +// {"tag":"Cost", "field_id": "Cost", "type": "*composed", "value": "Cost"}, +// ], +// "trailer_fields": [], // template of the exported trailer fields +// } +// }, -//"cdrc": { -// "*default": { -// "enabled": false, // enable CDR client functionality -// "dry_run": false, // do not send the CDRs to CDRS, just parse them -// "cdrs": "internal", // address where to reach CDR server. -// "cdr_format": "csv", // CDR file format -// "field_separator": ",", // separator used in case of csv files -// "timezone": "", // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> -// "run_delay": 0, // sleep interval in seconds between consecutive runs, 0 to use automation via inotify -// "max_open_files": 1024, // maximum simultaneous files to process, 0 for unlimited -// "data_usage_multiply_factor": 1024, // conversion factor for data usage -// "cdr_in_dir": "/var/log/cgrates/cdrc/in", // absolute path towards the directory where the CDRs are stored -// "cdr_out_dir": "/var/log/cgrates/cdrc/out", // absolute path towards the directory where processed CDRs will be moved -// "failed_calls_prefix": "missed_calls", // used in case of flatstore CDRs to avoid searching for BYE records -// "cdr_source_id": "freeswitch_csv", // free form field, tag identifying the source of the CDRs within CDRS database -// "cdr_filter": "", // filter CDR records to import -// "continue_on_success": false, // continue to the next template if executed -// "partial_record_cache": "10s", // duration to cache partial records when not pairing -// "header_fields": [], // template of the import header fields -// "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value -// {"tag": "tor", "field_id": "TOR", "type": "*composed", "value": "2", "mandatory": true}, -// {"tag": "accid", "field_id": "AccId", "type": "*composed", "value": "3", "mandatory": true}, -// {"tag": "reqtype", "field_id": "ReqType", "type": "*composed", "value": "4", "mandatory": true}, -// {"tag": "direction", "field_id": "Direction", "type": "*composed", "value": "5", "mandatory": true}, -// {"tag": "tenant", "field_id": "Tenant", "type": "*composed", "value": "6", "mandatory": true}, -// {"tag": "category", "field_id": "Category", "type": "*composed", "value": "7", "mandatory": true}, -// {"tag": "account", "field_id": "Account", "type": "*composed", "value": "8", "mandatory": true}, -// {"tag": "subject", "field_id": "Subject", "type": "*composed", "value": "9", "mandatory": true}, -// {"tag": "destination", "field_id": "Destination", "type": "*composed", "value": "10", "mandatory": true}, -// {"tag": "setup_time", "field_id": "SetupTime", "type": "*composed", "value": "11", "mandatory": true}, -// {"tag": "answer_time", "field_id": "AnswerTime", "type": "*composed", "value": "12", "mandatory": true}, -// {"tag": "usage", "field_id": "Usage", "type": "*composed", "value": "13", "mandatory": true}, -// ], -// "trailer_fields": [], // template of the import trailer fields -// } -//}, - -//"sm_generic": { -// "enabled": false, // starts SessionManager service: -// "listen_bijson": "127.0.0.1:2014", // address where to listen for bidirectional JSON-RPC requests -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server <""|internal|x.y.z.y:1234> -// "debit_interval": "0s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -//}, +// "cdrc": [ +// { +// "id": "*default", // identifier of the CDRC runner +// "enabled": false, // enable CDR client functionality +// "dry_run": false, // do not send the CDRs to CDRS, just parse them +// "cdrs_conns": [ +// {"address": "*internal"} // address where to reach CDR server. <*internal|x.y.z.y:1234> +// ], +// "cdr_format": "csv", // CDR file format +// "field_separator": ",", // separator used in case of csv files +// "timezone": "", // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> +// "run_delay": 0, // sleep interval in seconds between consecutive runs, 0 to use automation via inotify +// "max_open_files": 1024, // maximum simultaneous files to process, 0 for unlimited +// "data_usage_multiply_factor": 1024, // conversion factor for data usage +// "cdr_in_dir": "/var/log/cgrates/cdrc/in", // absolute path towards the directory where the CDRs are stored +// "cdr_out_dir": "/var/log/cgrates/cdrc/out", // absolute path towards the directory where processed CDRs will be moved +// "failed_calls_prefix": "missed_calls", // used in case of flatstore CDRs to avoid searching for BYE records +// "cdr_source_id": "freeswitch_csv", // free form field, tag identifying the source of the CDRs within CDRS database +// "cdr_filter": "", // filter CDR records to import +// "continue_on_success": false, // continue to the next template if executed +// "partial_record_cache": "10s", // duration to cache partial records when not pairing +// "header_fields": [], // template of the import header fields +// "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value +// {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "2", "mandatory": true}, +// {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "3", "mandatory": true}, +// {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "4", "mandatory": true}, +// {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "5", "mandatory": true}, +// {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "6", "mandatory": true}, +// {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "7", "mandatory": true}, +// {"tag": "Account", "field_id": "Account", "type": "*composed", "value": "8", "mandatory": true}, +// {"tag": "Subject", "field_id": "Subject", "type": "*composed", "value": "9", "mandatory": true}, +// {"tag": "Destination", "field_id": "Destination", "type": "*composed", "value": "10", "mandatory": true}, +// {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "11", "mandatory": true}, +// {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "12", "mandatory": true}, +// {"tag": "Usage", "field_id": "Usage", "type": "*composed", "value": "13", "mandatory": true}, +// ], +// "trailer_fields": [], // template of the import trailer fields +// }, +// ], -//"sm_freeswitch": { -// "enabled": false, // starts SessionManager service: -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "extra_fields": [], // extra fields to store in auth/CDRs when creating them -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval) -// "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls -// "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance -// "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined) -// "subscribe_park": true, // subscribe via fsock to receive park events -// "channel_sync_interval": "5m", // sync channels with freeswitch regularly -// "connections":[ // instantiate connections to multiple FreeSWITCH servers -// {"server": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} -// ], -//}, +// "sm_generic": { +// "enabled": false, // starts SessionManager service: +// "listen_bijson": "127.0.0.1:2014", // address where to listen for bidirectional JSON-RPC requests +// "rals_conns": [ +// {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> +// ], +// "cdrs_conns": [ +// {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> +// ], +// "debit_interval": "0s", // interval to perform debits on. +// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this +// "max_call_duration": "3h", // maximum call duration a prepaid call can last +// "session_ttl": "0s", // time after a session with no updates is terminated, not defined by default + //"session_ttl_last_used": "", // tweak LastUsed for sessions timing-out, not defined by default + //"session_ttl_usage": "", // tweak Usage for sessions timing-out, not defined by default +// }, -//"sm_kamailio": { -// "enabled": false, // starts SessionManager service: -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "connections":[ // instantiate connections to multiple Kamailio servers -// {"evapi_addr": "127.0.0.1:8448", "reconnects": 5} -// ], -//}, +// "sm_freeswitch": { +// "enabled": false, // starts SessionManager service: +// "rals_conns": [ +// {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> +// ], +// "cdrs_conns": [ +// {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> +// ], +// "create_cdr": false, // create CDR out of events and sends them to CDRS component +// "extra_fields": [], // extra fields to store in auth/CDRs when creating them +// "debit_interval": "10s", // interval to perform debits on. +// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this +// "max_call_duration": "3h", // maximum call duration a prepaid call can last +// "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval) +// "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls +// "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance +// "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined) +// "subscribe_park": true, // subscribe via fsock to receive park events +// "channel_sync_interval": "5m", // sync channels with freeswitch regularly +// "max_wait_connection": "2s", // maximum duration to wait for a connection to be retrieved from the pool +// "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers +// {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} +// ], +// }, -//"sm_opensips": { -// "enabled": false, // starts SessionManager service: -// "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "reconnects": 5, // number of reconnects if connection is lost -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "events_subscribe_interval": "60s", // automatic events subscription to OpenSIPS, 0 to disable it -// "mi_addr": "127.0.0.1:8020", // address where to reach OpenSIPS MI to send session disconnects -//}, +// "sm_kamailio": { +// "enabled": false, // starts SessionManager service: +// "rals_conns": [ +// {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> +// ], +// "cdrs_conns": [ +// {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> +// ], +// "create_cdr": false, // create CDR out of events and sends them to CDRS component +// "debit_interval": "10s", // interval to perform debits on. +// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this +// "max_call_duration": "3h", // maximum call duration a prepaid call can last +// "evapi_conns":[ // instantiate connections to multiple Kamailio servers +// {"address": "127.0.0.1:8448", "reconnects": 5} +// ], +// }, -//"diameter_agent": { -// "enabled": false, // enables the diameter agent: -// "listen": "127.0.0.1:3868", // address where to listen for diameter requests -// "dictionaries_dir": "/usr/share/cgrates/diameter/dict/", // path towards directory holding additional dictionaries to load -// "sm_generic": "internal", // connection towards SMG component for session management -// "timezone": "", // timezone for timestamps where not specified, empty for general defaults <""|UTC|Local|$IANA_TZ_DB> -// "origin_host": "CGR-DA", // diameter Origin-Host AVP used in replies -// "origin_realm": "cgrates.org", // diameter Origin-Realm AVP used in replies -// "vendor_id": 0, // diameter Vendor-Id AVP used in replies -// "product_name": "CGRateS", // diameter Product-Name AVP used in replies -// "request_processors": [ -// { -// "id": "*default", // formal identifier of this processor -// "dry_run": false, // do not send the CDRs to CDRS, just parse them -// "request_filter": "Subscription-Id>Subscription-Id-Type(0)", // filter requests processed by this processor -// "continue_on_success": false, // continue to the next template if executed -// "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value -// {"tag": "tor", "field_id": "TOR", "type": "*composed", "value": "^*voice", "mandatory": true}, -// {"tag": "accid", "field_id": "AccId", "type": "*composed", "value": "Session-Id", "mandatory": true}, -// {"tag": "reqtype", "field_id": "ReqType", "type": "*composed", "value": "^*users", "mandatory": true}, -// {"tag": "direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, -// {"tag": "tenant", "field_id": "Tenant", "type": "*composed", "value": "^*users", "mandatory": true}, -// {"tag": "category", "field_id": "Category", "type": "*composed", "value": "^call_;~Service-Information>IN-Information>Calling-Vlr-Number:s/^$/33000/;~Service-Information>IN-Information>Calling-Vlr-Number:s/^(\\d{5})/${1}/", "mandatory": true}, -// {"tag": "account", "field_id": "Account", "type": "*composed", "value": "^*users", "mandatory": true}, -// {"tag": "subject", "field_id": "Subject", "type": "*composed", "value": "^*users", "mandatory": true}, -// {"tag": "destination", "field_id": "Destination", "type": "*composed", "value": "Service-Information>IN-Information>Real-Called-Number", "mandatory": true}, -// {"tag": "setup_time", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, -// {"tag": "answer_time", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, -// {"tag": "usage", "field_id": "Usage", "type": "*composed", "value": "Requested-Service-Unit>CC-Time", "mandatory": true}, -// {"tag": "subscriber_id", "field_id": "SubscriberId", "type": "*composed", "value": "Subscription-Id>Subscription-Id-Data", "mandatory": true}, -// ], -// }, -// ], -//}, +// "sm_opensips": { +// "enabled": false, // starts SessionManager service: +// "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS +// "rals_conns": [ +// {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> +// ], +// "cdrs_conns": [ +// {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing <*internal|x.y.z.y:1234> +// ], +// "reconnects": 5, // number of reconnects if connection is lost +// "create_cdr": false, // create CDR out of events and sends it to CDRS component +// "debit_interval": "10s", // interval to perform debits on. +// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this +// "max_call_duration": "3h", // maximum call duration a prepaid call can last +// "events_subscribe_interval": "60s", // automatic events subscription to OpenSIPS, 0 to disable it +// "mi_addr": "127.0.0.1:8020", // address where to reach OpenSIPS MI to send session disconnects +// }, -//"historys": { -// "enabled": false, // starts History service: . -// "history_dir": "/var/log/cgrates/history", // location on disk where to store history files. -// "save_interval": "1s", // interval to save changed cache into .git archive -//}, +// "diameter_agent": { +// "enabled": false, // enables the diameter agent: +// "listen": "127.0.0.1:3868", // address where to listen for diameter requests +// "dictionaries_dir": "/usr/share/cgrates/diameter/dict/", // path towards directory holding additional dictionaries to load +// "sm_generic_conns": [ +// {"address": "*internal"} // connection towards SMG component for session management +// ], +// "pubsubs_conns": [], // address where to reach the pubusb service, empty to disable pubsub functionality: <""|*internal|x.y.z.y:1234> +// "create_cdr": true, // create CDR out of CCR terminate and send it to SMG component +// "debit_interval": "5m", // interval for CCR updates +// "timezone": "", // timezone for timestamps where not specified, empty for general defaults <""|UTC|Local|$IANA_TZ_DB> +// "dialect": "huawei", // the diameter dialect used in the communication, supported: +// "origin_host": "CGR-DA", // diameter Origin-Host AVP used in replies +// "origin_realm": "cgrates.org", // diameter Origin-Realm AVP used in replies +// "vendor_id": 0, // diameter Vendor-Id AVP used in replies +// "product_name": "CGRateS", // diameter Product-Name AVP used in replies +// "request_processors": [ +// { +// "id": "*default", // formal identifier of this processor +// "dry_run": false, // do not send the events to SMG, just log them +// "publish_event": false, // if enabled, it will publish internal event to pubsub +// "request_filter": "Subscription-Id>Subscription-Id-Type(0)", // filter requests processed by this processor +// "flags": [], // flags to influence processing behavior +// "continue_on_success": false, // continue to the next template if executed +// "append_cca": true, // when continuing will append cca fields to the previous ones +// "ccr_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value +// {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*voice", "mandatory": true}, +// {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, +// {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "^*users", "mandatory": true}, +// {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, +// {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "^*users", "mandatory": true}, +// {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^call", "mandatory": true}, +// {"tag": "Account", "field_id": "Account", "type": "*composed", "value": "^*users", "mandatory": true}, +// {"tag": "Subject", "field_id": "Subject", "type": "*composed", "value": "^*users", "mandatory": true}, +// {"tag": "Destination", "field_id": "Destination", "type": "*composed", "value": "Service-Information>IN-Information>Real-Called-Number", "mandatory": true}, +// {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, +// {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, +// {"tag": "Usage", "field_id": "Usage", "type": "*handler", "handler_id": "*ccr_usage", "mandatory": true}, +// {"tag": "SubscriberID", "field_id": "SubscriberId", "type": "*composed", "value": "Subscription-Id>Subscription-Id-Data", "mandatory": true}, +// ], +// "cca_fields":[ // fields returned in CCA +// {"tag": "GrantedUnits", "field_id": "Granted-Service-Unit>CC-Time", "type": "*handler", "handler_id": "*cca_usage", "mandatory": true}, +// ], +// }, +// ], +// }, -//"pubsubs": { -// "enabled": false, // starts PubSub service: . -//}, +// "historys": { +// "enabled": false, // starts History service: . +// "history_dir": "/var/log/cgrates/history", // location on disk where to store history files. +// "save_interval": "1s", // interval to save changed cache into .git archive +// }, -//"aliases": { -// "enabled": false, // starts Aliases service: . -//}, +// "pubsubs": { +// "enabled": false, // starts PubSub service: . +// }, -//"users": { -// "enabled": false, // starts User service: . -// "indexes": [], // user profile field indexes -//}, +// "aliases": { +// "enabled": false, // starts Aliases service: . +// }, -//"mailer": { -// "server": "localhost", // the server to use when sending emails out -// "auth_user": "cgrates", // authenticate to email server using this user -// "auth_passwd": "CGRateS.org", // authenticate to email server with this password -// "from_address": "cgr-mailer@localhost.localdomain" // from address used when sending emails out -//}, +// "users": { +// "enabled": false, // starts User service: . +// "indexes": [], // user profile field indexes +// }, -//"suretax": { -// "url": "", // API url -// "client_number": "", // client number, provided by SureTax -// "validation_key": "", // validation key provided by SureTax -// "business_unit": "", // client’s Business Unit -// "timezone": "Local", // convert the time of the events to this timezone before sending request out -// "include_local_cost": false, // sum local calculated cost with tax one in final cost -// "return_file_code": "0", // default or Quote purposes <0|Q> -// "response_group": "03", // determines how taxes are grouped for the response <03|13> -// "response_type": "D4", // determines the granularity of taxes and (optionally) the decimal precision for the tax calculations and amounts in the response -// "regulatory_code": "03", // provider type -// "client_tracking": "CgrId", // template extracting client information out of StoredCdr; <$RSRFields> -// "customer_number": "Subject", // template extracting customer number out of StoredCdr; <$RSRFields> -// "orig_number": "Subject", // template extracting origination number out of StoredCdr; <$RSRFields> -// "term_number": "Destination", // template extracting termination number out of StoredCdr; <$RSRFields> -// "bill_to_number": "", // template extracting billed to number out of StoredCdr; <$RSRFields> -// "zipcode": "", // template extracting billing zip code out of StoredCdr; <$RSRFields> -// "plus4": "", // template extracting billing zip code extension out of StoredCdr; <$RSRFields> -// "p2pzipcode": "", // template extracting secondary zip code out of StoredCdr; <$RSRFields> -// "p2pplus4": "", // template extracting secondary zip code extension out of StoredCdr; <$RSRFields> -// "units": "^1", // template extracting number of “lines” or unique charges contained within the revenue out of StoredCdr; <$RSRFields> -// "unit_type": "^00", // template extracting number of unique access lines out of StoredCdr; <$RSRFields> -// "tax_included": "^0", // template extracting tax included in revenue out of StoredCdr; <$RSRFields> -// "tax_situs_rule": "^04", // template extracting tax situs rule out of StoredCdr; <$RSRFields> -// "trans_type_code": "^010101", // template extracting transaction type indicator out of StoredCdr; <$RSRFields> -// "sales_type_code": "^R", // template extracting sales type code out of StoredCdr; <$RSRFields> -// "tax_exemption_code_list": "", // template extracting tax exemption code list out of StoredCdr; <$RSRFields> -//}, +// "mailer": { +// "server": "localhost", // the server to use when sending emails out +// "auth_user": "cgrates", // authenticate to email server using this user +// "auth_password": "CGRateS.org", // authenticate to email server with this password +// "from_address": "cgr-mailer@localhost.localdomain" // from address used when sending emails out +// }, -} + +// "suretax": { +// "url": "", // API url +// "client_number": "", // client number, provided by SureTax +// "validation_key": "", // validation key provided by SureTax +// "business_unit": "", // client’s Business Unit +// "timezone": "Local", // convert the time of the events to this timezone before sending request out +// "include_local_cost": false, // sum local calculated cost with tax one in final cost +// "return_file_code": "0", // default or Quote purposes <0|Q> +// "response_group": "03", // determines how taxes are grouped for the response <03|13> +// "response_type": "D4", // determines the granularity of taxes and (optionally) the decimal precision for the tax calculations and amounts in the response +// "regulatory_code": "03", // provider type +// "client_tracking": "CGRID", // template extracting client information out of StoredCdr; <$RSRFields> +// "customer_number": "Subject", // template extracting customer number out of StoredCdr; <$RSRFields> +// "orig_number": "Subject", // template extracting origination number out of StoredCdr; <$RSRFields> +// "term_number": "Destination", // template extracting termination number out of StoredCdr; <$RSRFields> +// "bill_to_number": "", // template extracting billed to number out of StoredCdr; <$RSRFields> +// "zipcode": "", // template extracting billing zip code out of StoredCdr; <$RSRFields> +// "plus4": "", // template extracting billing zip code extension out of StoredCdr; <$RSRFields> +// "p2pzipcode": "", // template extracting secondary zip code out of StoredCdr; <$RSRFields> +// "p2pplus4": "", // template extracting secondary zip code extension out of StoredCdr; <$RSRFields> +// "units": "^1", // template extracting number of “lines” or unique charges contained within the revenue out of StoredCdr; <$RSRFields> +// "unit_type": "^00", // template extracting number of unique access lines out of StoredCdr; <$RSRFields> +// "tax_included": "^0", // template extracting tax included in revenue out of StoredCdr; <$RSRFields> +// "tax_situs_rule": "^04", // template extracting tax situs rule out of StoredCdr; <$RSRFields> +// "trans_type_code": "^010101", // template extracting transaction type indicator out of StoredCdr; <$RSRFields> +// "sales_type_code": "^R", // template extracting sales type code out of StoredCdr; <$RSRFields> +// "tax_exemption_code_list": "", // template extracting tax exemption code list out of StoredCdr; <$RSRFields> +// }, + +} \ No newline at end of file diff --git a/data/conf/samples/actions/cgradmin.json b/data/conf/samples/actions/cgradmin.json index 4ea1f0eef..b9d6b75e2 100644 --- a/data/conf/samples/actions/cgradmin.json +++ b/data/conf/samples/actions/cgradmin.json @@ -10,11 +10,17 @@ "http": ":2080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: - "pubsubs": "internal", - "users": "internal", - "aliases": "internal", + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { diff --git a/data/conf/samples/apier/apier.json b/data/conf/samples/apier/apier.json index 6b80a486b..8234d67b0 100644 --- a/data/conf/samples/apier/apier.json +++ b/data/conf/samples/apier/apier.json @@ -10,9 +10,11 @@ "http": ":2080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: - "aliases": "internal" + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { @@ -21,7 +23,9 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater <""|*internal|127.0.0.1:2013> + ], }, "aliases": { @@ -34,4 +38,9 @@ } }, +"cdrstats": { + "enabled": true, // starts the cdrstats service: + "save_interval": "0s", // interval to save changed stats into dataDb storage +}, + } diff --git a/data/conf/samples/cdrccsv/cgrates.json b/data/conf/samples/cdrccsv/cgrates.json new file mode 100644 index 000000000..06768551f --- /dev/null +++ b/data/conf/samples/cdrccsv/cgrates.json @@ -0,0 +1,57 @@ +{ + +// Real-time Charging System for Telecom & ISP environments +// Copyright (C) ITsysCOM GmbH +// +// This file contains the default configuration hardcoded into CGRateS. +// This is what you get when you load CGRateS with an empty configuration file. + + + "rals": { + "enabled": true // so we can query CDRs + }, + + "cdrs": { + "enabled": true, + "rals_conns": [], // no rating support, just *raw CDR testing +}, + + + + "cdrc": [ + { + "id": "*default", + "enabled": true, + "cdr_in_dir": "/tmp/cdrctests/csvit1/in", + "cdr_out_dir": "/tmp/cdrctests/csvit1/out", + "cdr_source_id": "csvit1", + }, + { + "id": "*CSVit2", // identifier of the CDRC runner + "enabled": true, // enable CDR client functionality + "field_separator": ";", + "cdr_in_dir": "/tmp/cdrctests/csvit2/in", // absolute path towards the directory where the CDRs are stored + "cdr_out_dir": "/tmp/cdrctests/csvit2/out", // absolute path towards the directory where processed CDRs will be moved + "cdr_source_id": "csvit2", // free form field, tag identifying the source of the CDRs within CDRS database + "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value + {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*voice", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "0", "mandatory": true}, + {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "1", "mandatory": true}, + {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, + {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "2", "mandatory": true}, + {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^call", "mandatory": true}, + {"tag": "Account", "field_id": "Account", "type": "*composed", "value": "3", "mandatory": true}, + {"tag": "Subject", "field_id": "Subject", "type": "*composed", "value": "3", "mandatory": true}, + {"tag": "Destination", "field_id": "Destination", "type": "*composed", "value": "~4:s/0([1-9]\\d+)/+49${1}/", "mandatory": true}, + {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "5", "mandatory": true}, + {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "5", "mandatory": true}, + {"tag": "Usage", "field_id": "Usage", "type": "*composed", "value": "6", "mandatory": true}, + {"tag": "HDRExtra3", "field_id": "HDRExtra3", "type": "*composed", "value": "6", "mandatory": true}, + {"tag": "HDRExtra2", "field_id": "HDRExtra2", "type": "*composed", "value": "6", "mandatory": true}, + {"tag": "HDRExtra1", "field_id": "HDRExtra1", "type": "*composed", "value": "6", "mandatory": true}, + ], + }, +], + + +} \ No newline at end of file diff --git a/data/conf/samples/cdrcflatstore/cgrates.json b/data/conf/samples/cdrcflatstore/cgrates.json index 454a5db8a..5b637fa20 100644 --- a/data/conf/samples/cdrcflatstore/cgrates.json +++ b/data/conf/samples/cdrcflatstore/cgrates.json @@ -7,7 +7,7 @@ // This is what you get when you load CGRateS with an empty configuration file. -"rater": { +"rals": { "enabled": true, // enable Rater service: }, @@ -22,10 +22,13 @@ }, -"cdrc": { - "FLATSTORE": { +"cdrc": [ + { + "id": "FLATSTORE", "enabled": true, // enable CDR client functionality - "cdrs": "internal", // address where to reach CDR server. + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR server. <*internal|x.y.z.y:1234> + ], "cdr_format": "opensips_flatstore", // CDR file format "field_separator": "|", // separator used in case of csv files "run_delay": 0, // sleep interval in seconds between consecutive runs, 0 to use automation via inotify @@ -54,6 +57,6 @@ {"tag": "DialogId", "cdr_field_id": "DialogId", "type": "cdrfield", "value": "11"}, ], }, -}, +], } diff --git a/data/conf/samples/cdrcfwv/cgrates.json b/data/conf/samples/cdrcfwv/cgrates.json index c9b30158e..6a3a3f3ab 100644 --- a/data/conf/samples/cdrcfwv/cgrates.json +++ b/data/conf/samples/cdrcfwv/cgrates.json @@ -7,7 +7,7 @@ // This is what you get when you load CGRateS with an empty configuration file. -"rater": { +"rals": { "enabled": true, // enable Rater service: }, @@ -22,11 +22,14 @@ }, -"cdrc": { - "FWV1": { +"cdrc": [ + { + "id": "FWV1", "enabled": true, // enable CDR client functionality "dry_run": true, - "cdrs": "internal", // address where to reach CDR server. + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR server. <*internal|x.y.z.y:1234> + ], "cdr_format": "fwv", // CDR file format "cdr_in_dir": "/tmp/cgr_fwv/cdrc/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgr_fwv/cdrc/out", // absolute path towards the directory where processed CDRs will be moved @@ -60,6 +63,6 @@ {"tag": "TotalDuration", "type": "metatag", "metatag_id":"total_duration", "value": "150", "width": 12}, ], }, -}, +], } diff --git a/data/conf/samples/cdrsreplicationmaster/cdrsreplicationmaster.json b/data/conf/samples/cdrsreplicationmaster/cdrsreplicationmaster.json index 20b8605d6..030824c84 100644 --- a/data/conf/samples/cdrsreplicationmaster/cdrsreplicationmaster.json +++ b/data/conf/samples/cdrsreplicationmaster/cdrsreplicationmaster.json @@ -4,7 +4,7 @@ // Used in apier_local_tests // Starts rater, cdrs and mediator connecting over internal channel -"rater": { +"rals": { "enabled": true, // enable Rater service: }, @@ -12,8 +12,8 @@ "enabled": true, // start the CDR Server service: "store_cdrs": false, // store cdrs in storDb "cdr_replication":[ // replicate the rated CDR to a number of servers - {"transport": "*http_post", "server": "http://127.0.0.1:12080/cdr_http", "attempts": 1}, - //{"transport": "*http_post", "server": "http://127.0.0.1:8000/mycdr"}, + {"transport": "*http_post", "address": "http://127.0.0.1:12080/cdr_http", "attempts": 1}, + //{"transport": "*http_post", "address": "http://127.0.0.1:8000/mycdr"}, ], }, diff --git a/data/conf/samples/cdrsreplicationslave/cdrsreplicationslave.json b/data/conf/samples/cdrsreplicationslave/cdrsreplicationslave.json index 2d06fb5a8..a9d04ca13 100644 --- a/data/conf/samples/cdrsreplicationslave/cdrsreplicationslave.json +++ b/data/conf/samples/cdrsreplicationslave/cdrsreplicationslave.json @@ -10,7 +10,7 @@ "http": "127.0.0.1:12080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: }, diff --git a/data/conf/samples/cdrstats/cdrstats.json b/data/conf/samples/cdrstats/cdrstats.json index 0c32dd32b..fc4d074b0 100644 --- a/data/conf/samples/cdrstats/cdrstats.json +++ b/data/conf/samples/cdrstats/cdrstats.json @@ -5,24 +5,31 @@ // Starts rater, cdrs and mediator connecting over internal channel "listen": { - "rpc_json": ":2012", // RPC JSON listening address - "rpc_gob": ":2013", // RPC GOB listening address - "http": ":2080", // HTTP listening address + "rpc_json": ":2012", + "rpc_gob": ":2013", + "http": ":2080", }, -"rater": { - "enabled": true, // enable Rater service: - "cdrstats": "internal", +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], }, + "cdrs": { - "enabled": true, // start the CDR Server service: - "store_cdrs": false, // store cdrs in storDb - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service. Empty to disable stats gathering out of mediated CDRs <""|internal|x.y.z.y:1234> + "enabled": true, + "store_cdrs": false, + "rals_conns": [ + {"address": "*internal"} + ], + "cdrstats_conns": [ + {"address": "*internal"} + ] }, "cdrstats": { - "enabled": true, // starts the cdrstats service: + "enabled": true, "save_interval": "1s", }, diff --git a/data/conf/samples/cdrsv2mongo/cdrsv2psql.json b/data/conf/samples/cdrsv2mongo/cdrsv2psql.json index e62a46f95..8b7600efb 100644 --- a/data/conf/samples/cdrsv2mongo/cdrsv2psql.json +++ b/data/conf/samples/cdrsv2mongo/cdrsv2psql.json @@ -10,13 +10,15 @@ }, -"rater": { +"rals": { "enabled": true, // enable Rater service: }, "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater for cost calculation, empty to disable functionality: <""|*internal|x.y.z.y:1234> + ], }, -} \ No newline at end of file +} diff --git a/data/conf/samples/cdrsv2mysql/cdrsv2mysql.json b/data/conf/samples/cdrsv2mysql/cdrsv2mysql.json index e05ac0535..4f424a0f8 100644 --- a/data/conf/samples/cdrsv2mysql/cdrsv2mysql.json +++ b/data/conf/samples/cdrsv2mysql/cdrsv2mysql.json @@ -4,13 +4,15 @@ // Used in apier_local_tests // Starts rater, cdrs and mediator connecting over internal channel -"rater": { +"rals": { "enabled": true, // enable Rater service: }, "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater for cost calculation, empty to disable functionality: <""|*internal|x.y.z.y:1234> + ], }, -} \ No newline at end of file +} diff --git a/data/conf/samples/cdrsv2psql/cdrsv2psql.json b/data/conf/samples/cdrsv2psql/cdrsv2psql.json index 9f78c6df8..0a715bc07 100644 --- a/data/conf/samples/cdrsv2psql/cdrsv2psql.json +++ b/data/conf/samples/cdrsv2psql/cdrsv2psql.json @@ -10,13 +10,15 @@ }, -"rater": { +"rals": { "enabled": true, // enable Rater service: }, "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> + "rals_conns": [ + {"address": "*internal"} // address where to reach the Rater for cost calculation, empty to disable functionality: <""|*internal|x.y.z.y:1234> + ], }, -} \ No newline at end of file +} diff --git a/data/conf/samples/cgradmin/cgradmin.json b/data/conf/samples/cgradmin/cgradmin.json index 2f6f9e4a1..431ad027d 100644 --- a/data/conf/samples/cgradmin/cgradmin.json +++ b/data/conf/samples/cgradmin/cgradmin.json @@ -10,32 +10,38 @@ "http": ":2080", // HTTP listening address }, -//"tariffplan_db": { // database used to store offline tariff plans and CDRs -// "db_type": "mongo", // stor database type to use: -// "db_host": "127.0.0.1", // the host to connect to -// "db_port": 27017, // the port to reach the stordb -// "db_name": "tpdb", -//}, -// -//"data_db": { // database used to store offline tariff plans and CDRs -// "db_type": "mongo", // stor database type to use: -// "db_host": "127.0.0.1", // the host to connect to -// "db_port": 27017, // the port to reach the stordb -// "db_name": "datadb", -//}, +"tariffplan_db": { // database used to store offline tariff plans and CDRs + "db_type": "mongo", // stor database type to use: + "db_host": "127.0.0.1", // the host to connect to + "db_port": 27017, // the port to reach the stordb + "db_name": "tpdb", +}, + +"data_db": { // database used to store offline tariff plans and CDRs + "db_type": "mongo", // stor database type to use: + "db_host": "127.0.0.1", // the host to connect to + "db_port": 27017, // the port to reach the stordb + "db_name": "datadb", +}, "stor_db": { // database used to store offline tariff plans and CDRs "db_type": "mongo", // stor database type to use: "db_host": "127.0.0.1", // the host to connect to "db_port": 27017, // the port to reach the stordb - "db_name": "stordb", + "db_name": "stordb", }, -"rater": { +"rals": { "enabled": true, // enable Rater service: - "pubsubs": "internal", - "users": "internal", - "aliases": "internal", + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { diff --git a/data/conf/samples/dmtagent/cgrates.json b/data/conf/samples/dmtagent/cgrates.json index 93fe0df45..9599f11d2 100644 --- a/data/conf/samples/dmtagent/cgrates.json +++ b/data/conf/samples/dmtagent/cgrates.json @@ -10,26 +10,35 @@ "http": ":2080", // HTTP listening address }, -"rater": { - "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "pubsubs": "internal", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "internal", +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { - "enabled": true, // start Scheduler service: + "enabled": true, }, "cdrs": { - "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], }, "cdrstats": { - "enabled": true, // starts the cdrstats service: + "enabled": true, }, "pubsubs": { @@ -47,13 +56,13 @@ "sm_generic": { "enabled": true, - "rater": "internal", - "cdrs": "internal", }, "diameter_agent": { "enabled": true, - "pubsubs": "internal", + "pubsubs_conns": [ + {"address": "*internal"} + ], }, } diff --git a/data/conf/samples/dmtagent/data.json b/data/conf/samples/dmtagent/data.json new file mode 100644 index 000000000..c10765409 --- /dev/null +++ b/data/conf/samples/dmtagent/data.json @@ -0,0 +1,110 @@ + +{ + +"diameter_agent": { + "request_processors": [ + { + "id": "data_init", // formal identifier of this processor + "dry_run": false, // do not send the events to SMG, just log them + "request_filter": "Service-Context-Id(^gprs);CC-Request-Type(1)", // filter requests processed by this processor + "continue_on_success": false, // continue to the next template if executed + "ccr_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value + {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*data", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "^*prepaid", "mandatory": true}, + {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, + {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "^cgrates.org", "mandatory": true}, + {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^generic", "mandatory": true}, + {"tag": "Account", "field_id": "Account", "type": "*grouped", "value": "Subscription-Id>Subscription-Id-Data", "field_filter":"Subscription-Id>Subscription-Id-Type(0)", "mandatory": true}, + {"tag": "Destination", "field_id": "Destination", "type": "*constant", "value": "data"}, + {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "Usage", "field_id": "Usage", "type": "*constant", "value": "2048"}, + ], + "cca_fields": [ + {"tag": "ResultCode", "field_id": "Result-Code", "type": "*constant", "value": "^2001"}, + {"tag": "ResultCode", "field_filter": "CGRMaxUsage(0)", "field_id": "Result-Code", "type": "*constant", "value": "4010"}, + ], + }, + { + "id": "data_update_grp1", // formal identifier of this processor + "dry_run": false, // do not send the events to SMG, just log them + "request_filter": "Service-Context-Id(^gprs);CC-Request-Type(2);Multiple-Services-Credit-Control>Rating-Group(1)", // filter requests processed by this processor + "continue_on_success": true, // continue to the next template if executed + "ccr_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value + {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*data", "mandatory": true}, + {"tag": "InitialOriginID", "field_id": "InitialOriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "^_grp1", "append": true}, + {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "^*prepaid", "mandatory": true}, + {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, + {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "^cgrates.org", "mandatory": true}, + {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^generic", "mandatory": true}, + {"tag": "Account", "field_id": "Account", "type": "*grouped", "value": "Subscription-Id>Subscription-Id-Data", "field_filter":"Subscription-Id>Subscription-Id-Type(0)", "mandatory": true}, + {"tag": "Destination", "field_id": "Destination", "type": "*constant", "value": "data"}, + {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "Usage", "field_id": "Usage", "type": "*constant", "value": "2048"}, + {"tag": "LastUsed", "field_id": "LastUsed", "field_filter":"Multiple-Services-Credit-Control>Rating-Group(1)", "type": "*handler", "handler_id": "*sum", + "value": "Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets;^|;Multiple-Services-Credit-Control>Used-Service-Unit>CC-Output-Octets"}, + ], + "cca_fields": [ + {"tag": "ResultCode", "field_id": "Result-Code", "type": "*constant", "value": "^2001"}, + {"tag": "ResultCode", "field_filter": "CGRMaxUsage(0)", "field_id": "Result-Code", "type": "*constant", "value": "4010"}, + ], + }, + { + "id": "data_update_grp2", // formal identifier of this processor + "dry_run": false, // do not send the events to SMG, just log them + "request_filter": "Service-Context-Id(^gprs);CC-Request-Type(2);Multiple-Services-Credit-Control>Rating-Group(2)", // filter requests processed by this processor + "continue_on_success": true, // continue to the next template if executed + "ccr_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value + {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*data", "mandatory": true}, + {"tag": "InitialOriginID", "field_id": "InitialOriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "^_grp2", "append": true}, + {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "^*prepaid", "mandatory": true}, + {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, + {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "^cgrates.org", "mandatory": true}, + {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^generic", "mandatory": true}, + {"tag": "Account", "field_id": "Account", "type": "*grouped", "value": "Subscription-Id>Subscription-Id-Data", "field_filter":"Subscription-Id>Subscription-Id-Type(0)", "mandatory": true}, + {"tag": "Destination", "field_id": "Destination", "type": "*constant", "value": "data"}, + {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "Usage", "field_id": "Usage", "type": "*constant", "value": "2048"}, + {"tag": "LastUsed", "field_id": "LastUsed", "field_filter":"Multiple-Services-Credit-Control>Rating-Group(2)", "type": "*handler", "handler_id": "*sum", + "value": "Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets;^|;Multiple-Services-Credit-Control>Used-Service-Unit>CC-Output-Octets"}, + ], + "cca_fields": [ + {"tag": "ResultCode", "field_id": "Result-Code", "type": "*constant", "value": "^2001"}, + {"tag": "ResultCode", "field_filter": "CGRMaxUsage(0)", "field_id": "Result-Code", "type": "*constant", "value": "4010"}, + ], + }, + { + "id": "data_terminate", // formal identifier of this processor + "dry_run": false, // do not send the events to SMG, just log them + "request_filter": "Service-Context-Id(^gprs);CC-Request-Type(3)", // filter requests processed by this processor + "continue_on_success": false, // continue to the next template if executed + "ccr_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value + {"tag": "TOR", "field_id": "ToR", "type": "*composed", "value": "^*data", "mandatory": true}, + {"tag": "OriginID", "field_id": "OriginID", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "OriginIDPrefix", "field_id": "OriginIDPrefix", "type": "*composed", "value": "Session-Id", "mandatory": true}, + {"tag": "RequestType", "field_id": "RequestType", "type": "*composed", "value": "^*prepaid", "mandatory": true}, + {"tag": "Direction", "field_id": "Direction", "type": "*composed", "value": "^*out", "mandatory": true}, + {"tag": "Tenant", "field_id": "Tenant", "type": "*composed", "value": "^cgrates.org", "mandatory": true}, + {"tag": "Category", "field_id": "Category", "type": "*composed", "value": "^generic", "mandatory": true}, + {"tag": "Account", "field_id": "Account", "type": "*grouped", "value": "Subscription-Id>Subscription-Id-Data", "field_filter":"Subscription-Id>Subscription-Id-Type(0)", "mandatory": true}, + {"tag": "Destination", "field_id": "Destination", "type": "*constant", "value": "data"}, + {"tag": "SetupTime", "field_id": "SetupTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "AnswerTime", "field_id": "AnswerTime", "type": "*composed", "value": "Event-Timestamp", "mandatory": true}, + {"tag": "LastUsed", "field_id": "LastUsed", "type": "*handler", "handler_id": "*sum", + "value": "Multiple-Services-Credit-Control>Used-Service-Unit>CC-Input-Octets;^|;Multiple-Services-Credit-Control>Used-Service-Unit>CC-Output-Octets"}, + ], + "cca_fields": [ + {"tag": "ResultCode", "field_id": "Result-Code", "type": "*constant", "value": "^2001"} + ], + }, + ] +} + +} \ No newline at end of file diff --git a/data/conf/samples/fscsv/cgrates.json b/data/conf/samples/fscsv/cgrates.json index c7c8abba9..233fb6f1f 100644 --- a/data/conf/samples/fscsv/cgrates.json +++ b/data/conf/samples/fscsv/cgrates.json @@ -6,9 +6,11 @@ "http": ":2080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], }, "scheduler": { @@ -17,13 +19,14 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], }, "cdrstats": { "enabled": true, // starts the cdrstats service: - "save_interval": "5s" + "save_interval": "5s" }, } diff --git a/data/conf/samples/fscsv/freeswitch_csvcdr.json b/data/conf/samples/fscsv/freeswitch_csvcdr.json index b20f3c3f4..27c9932b9 100644 --- a/data/conf/samples/fscsv/freeswitch_csvcdr.json +++ b/data/conf/samples/fscsv/freeswitch_csvcdr.json @@ -1,8 +1,9 @@ { // Contains CDRC template for FreeSWITCH CDR -"cdrc": { - "CDRC-CSV2": { +"cdrc": [ + { + "id": "CDRC-CSV2", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc_fs/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc_fs/out", // absolute path towards the directory where processed CDRs will be moved @@ -22,6 +23,6 @@ {"tag": "usage", "cdr_field_id": "usage", "type": "cdrfield", "value": "~8:s/^(\\d+)$/${1}s/", "mandatory": true}, ], }, -}, +], } diff --git a/data/conf/samples/hapool/cgrrater1/cgr.json b/data/conf/samples/hapool/cgrrater1/cgr.json new file mode 100644 index 000000000..d33b19f7b --- /dev/null +++ b/data/conf/samples/hapool/cgrrater1/cgr.json @@ -0,0 +1,45 @@ +{ +"listen": { + "rpc_json": ":2014", + "rpc_gob": ":2015", + "http": ":2081", +}, + +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], +}, + +"scheduler": { + "enabled": true, +}, + +"cdrstats": { + "enabled": true, +}, + +"pubsubs": { + "enabled": true, +}, + +"aliases": { + "enabled": true, +}, + +"users": { + "enabled": true, + "indexes": ["SubscriberId"], +}, + +} diff --git a/data/conf/samples/hapool/cgrrater2/cgr.json b/data/conf/samples/hapool/cgrrater2/cgr.json new file mode 100644 index 000000000..604f5b9ee --- /dev/null +++ b/data/conf/samples/hapool/cgrrater2/cgr.json @@ -0,0 +1,46 @@ +{ + +"listen": { + "rpc_json": ":2016", + "rpc_gob": ":2017", + "http": ":2082", +}, + +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], +}, + +"scheduler": { + "enabled": true, +}, + +"cdrstats": { + "enabled": true, +}, + +"pubsubs": { + "enabled": true, +}, + +"aliases": { + "enabled": true, +}, + +"users": { + "enabled": true, + "indexes": ["SubscriberId"], +}, + +} diff --git a/data/conf/samples/hapool/cgrsmg1/cgr.json b/data/conf/samples/hapool/cgrsmg1/cgr.json new file mode 100644 index 000000000..0473fb2bc --- /dev/null +++ b/data/conf/samples/hapool/cgrsmg1/cgr.json @@ -0,0 +1,27 @@ +{ +"listen": { + "rpc_json": "127.0.0.1:2018", // RPC JSON listening address + "rpc_gob": "127.0.0.1:2019", // RPC GOB listening address + "http": "127.0.0.1:2083", // HTTP listening address +}, + +"cdrs": { + "enabled": true, // start the CDR Server service: + "rals_conns": [ + {"address": "127.0.0.1:2014"}, + {"address": "127.0.0.1:2016"} + ], + "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> +}, + +"sm_generic": { + "enabled": true, + "rals_conns": [ + {"address": "127.0.0.1:2014"}, + {"address": "127.0.0.1:2016"} + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing + ], +}, +} diff --git a/data/conf/samples/hapool/cgrsmg2/cgr.json b/data/conf/samples/hapool/cgrsmg2/cgr.json new file mode 100644 index 000000000..645a8ccf3 --- /dev/null +++ b/data/conf/samples/hapool/cgrsmg2/cgr.json @@ -0,0 +1,28 @@ +{ +"listen": { + "rpc_json": "127.0.0.1:2020", // RPC JSON listening address + "rpc_gob": "127.0.0.1:2021", // RPC GOB listening address + "http": "127.0.0.1:2084", // HTTP listening address +}, + +"cdrs": { + "enabled": true, // start the CDR Server service: + "rals_conns": [ + {"address": "127.0.0.1:2014"}, + {"address": "127.0.0.1:2016"} + ], + "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> +}, + +"sm_generic": { + "enabled": true, + "rals_conns": [ + {"address": "127.0.0.1:2014"}, + {"address": "127.0.0.1:2016"} + ], + "cdrs_conns": [ + {"address": "*internal"} // address where to reach CDR Server, empty to disable CDR capturing + ], +}, + +} diff --git a/data/conf/samples/hapool/dagent/cgr.json b/data/conf/samples/hapool/dagent/cgr.json new file mode 100644 index 000000000..bf06a164c --- /dev/null +++ b/data/conf/samples/hapool/dagent/cgr.json @@ -0,0 +1,10 @@ +{ +"diameter_agent": { + "enabled": true, + "listen": "127.0.0.1:3868", + "sm_generic_conns": [ + {"address": "127.0.0.1:2018"}, + {"address": "127.0.0.1:2020"} + ], +}, +} diff --git a/data/conf/samples/multifiles/b/b.json b/data/conf/samples/multifiles/b/b.json index 2187cae01..84e72f697 100644 --- a/data/conf/samples/multifiles/b/b.json +++ b/data/conf/samples/multifiles/b/b.json @@ -4,7 +4,7 @@ // Should be the second file loaded "general": { - "default_reqtype": "*pseudoprepaid", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> + "default_request_type": "*pseudoprepaid", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> }, "cdre": { diff --git a/data/conf/samples/multiplecdrc/multiplecdrc_fwexport.json b/data/conf/samples/multiplecdrc/multiplecdrc_fwexport.json index d6285e750..a9b18ad40 100644 --- a/data/conf/samples/multiplecdrc/multiplecdrc_fwexport.json +++ b/data/conf/samples/multiplecdrc/multiplecdrc_fwexport.json @@ -4,7 +4,7 @@ // Used in mediator_local_test // Starts rater, cdrs and mediator connecting over internal channel -"rater": { +"rals": { "enabled": true, // enable Rater service: }, @@ -14,17 +14,18 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "mediator": "internal", // address where to reach the Mediator. Empty for disabling mediation. <""|internal> }, -"cdrc": { - "CDRC-CSV1": { +"cdrc": [ + { + "id": "CDRC-CSV1", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc1/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc1/out", // absolute path towards the directory where processed CDRs will be moved "cdr_source_id": "csv1", // free form field, tag identifying the source of the CDRs within CDRS database }, - "CDRC-CSV2": { + { + "id": "CDRC-CSV2", "enabled": true, // enable CDR client functionality "cdr_in_dir": "/tmp/cgrates/cdrc2/in", // absolute path towards the directory where the CDRs are stored "cdr_out_dir": "/tmp/cgrates/cdrc2/out", // absolute path towards the directory where processed CDRs will be moved @@ -44,7 +45,8 @@ {"cdr_field_id": "usage", "value": "~9:s/^(\\d+)$/${1}s/"}, ], }, - "CDRC-CSV3": { + { + "id": "CDRC-CSV3", "enabled": true, // enable CDR client functionality "field_separator": ";", // separator used in case of csv files "cdr_in_dir": "/tmp/cgrates/cdrc3/in", // absolute path towards the directory where the CDRs are stored @@ -65,11 +67,7 @@ {"cdr_field_id": "usage", "value": "~6:s/^(\\d+)$/${1}s/"}, ], } -}, - -"mediator": { - "enabled": true, // starts Mediator service: . -}, +], "cdre": { "CDRE-FW1": { diff --git a/data/conf/samples/multiral1/cgrates.json b/data/conf/samples/multiral1/cgrates.json new file mode 100644 index 000000000..db96253dc --- /dev/null +++ b/data/conf/samples/multiral1/cgrates.json @@ -0,0 +1,17 @@ +{ +// CGRateS Configuration file +// +// Used for multiple RAL configuration tests +// Starts rater, scheduler + +"listen": { + "rpc_json": ":2012", // RPC JSON listening address + "rpc_gob": ":2013", // RPC GOB listening address + "http": ":2080", // HTTP listening address +}, + +"rals": { + "enabled": true, // enable Rater service: +}, + +} \ No newline at end of file diff --git a/data/conf/samples/multiral2/cgrates.json b/data/conf/samples/multiral2/cgrates.json new file mode 100644 index 000000000..7bc2d4e8f --- /dev/null +++ b/data/conf/samples/multiral2/cgrates.json @@ -0,0 +1,17 @@ +{ +// CGRateS Configuration file +// +// Used for multiple RAL configuration tests +// Starts RAL + +"listen": { + "rpc_json": ":12012", // RPC JSON listening address + "rpc_gob": ":12013", // RPC GOB listening address + "http": ":12080", // HTTP listening address +}, + +"rals": { + "enabled": true, // enable Rater service: +}, + +} \ No newline at end of file diff --git a/data/conf/samples/osips_cdrs_cdrstats.cfg b/data/conf/samples/osips_cdrs_cdrstats.cfg deleted file mode 100644 index fbdeb4dba..000000000 --- a/data/conf/samples/osips_cdrs_cdrstats.cfg +++ /dev/null @@ -1,61 +0,0 @@ -# Real-time Charging System for Telecom & ISP environments -# Copyright (C) ITsysCOM GmbH -# -# This file contains the default configuration hardcoded into CGRateS. -# This is what you get when you load CGRateS with an empty configuration file. - -[global] -rpc_json_listen = :2012 # RPC JSON listening address - -[rater] -enabled = true # Enable RaterCDRSExportPath service: . - -[scheduler] -enabled = true # Starts Scheduler service: . - -[cdrs] -enabled = true # Start the CDR Server service: . -mediator = internal # Address where to reach the Mediator. Empty for disabling mediation. <""|internal> -# cdrstats = # Address where to reach the cdrstats service: - -[mediator] -enabled = true # Starts Mediator service: . -# rater = internal # Address where to reach the Rater: -# cdrstats = internal # Address where to reach the cdrstats service: - -[cdrstats] -enabled = true # Starts the cdrstats service: -#queue_length = 50 # Number of items in the stats buffer -time_window = 1h # Will only keep the CDRs who's call setup time is not older than time.Now()-TimeWindow -save_interval = 5s -# metrics = ASR, ACD, ACC # Stat metric ids to build -# setup_interval = # Filter on CDR SetupTime -# tors = # Filter on CDR TOR fields -# cdr_hosts= # Filter on CDR CdrHost fields -# cdr_sources = # Filter on CDR CdrSource fields -# req_types = # Filter on CDR ReqType fields -# directions = # Filter on CDR Direction fields -# tenants = # Filter on CDR Tenant fields -# categories = # Filter on CDR Category fields -# accounts = # Filter on CDR Account fields -# subjects = # Filter on CDR Subject fields -# destination_prefixes = # Filter on CDR Destination prefixes -# usage_interval = # Filter on CDR Usage -# mediation_run_ids = # Filter on CDR MediationRunId fields -# rated_accounts = # Filter on CDR RatedAccount fields -# rated_subjects = # Filter on CDR RatedSubject fields -# cost_intervals = # Filter on CDR Cost - -[session_manager] -enabled = true # Starts SessionManager service: -switch_type = opensips # Defines the type of switch behind: - -[opensips] -listen_udp = :2020 # Address where to listen for datagram events coming from OpenSIPS -mi_addr = 172.16.254.77:8020 # Adress where to reach OpenSIPS mi_datagram module - -[mailer] -# server = localhost # The server to use when sending emails out -# auth_user = cgrates # Authenticate to email server using this user -# auth_passwd = CGRateS.org # Authenticate to email server with this password -# from_address = cgr-mailer@localhost.localdomain # From address used when sending emails out diff --git a/data/conf/samples/smfs/smfs.json b/data/conf/samples/smfs/smfs.json new file mode 100644 index 000000000..094e7eecf --- /dev/null +++ b/data/conf/samples/smfs/smfs.json @@ -0,0 +1,25 @@ +{ + +// Real-time Charging System for Telecom & ISP environments +// Copyright (C) ITsysCOM GmbH +// +// This file contains the default configuration hardcoded into CGRateS. +// This is what you get when you load CGRateS with an empty configuration file. + + +"sm_freeswitch": { + "enabled": true, + "rals_conns": [ + {"address": "127.0.0.1:2013"} + ], + "cdrs_conns": [ + {"address": "127.0.0.1:2013"} + ], + "debit_interval": "5s", + "channel_sync_interval": "10s", + "event_socket_conns":[ + {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} + ], +}, + +} \ No newline at end of file diff --git a/data/conf/samples/smg/cgrates.json b/data/conf/samples/smg/cgrates.json index 0cbfc28c2..d49f06d2b 100644 --- a/data/conf/samples/smg/cgrates.json +++ b/data/conf/samples/smg/cgrates.json @@ -4,13 +4,17 @@ // Used for cgradmin // Starts rater, scheduler +"general": { + "response_cache_ttl": "1s", +}, + "listen": { "rpc_json": ":2012", // RPC JSON listening address "rpc_gob": ":2013", // RPC GOB listening address "http": ":2080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: }, @@ -20,13 +24,11 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> }, "sm_generic": { "enabled": true, - "rater": "internal", - "cdrs": "internal", + "session_ttl": "50ms", }, } diff --git a/data/conf/samples/smgeneric/cgrates.json b/data/conf/samples/smgeneric/cgrates.json index 7907e7a47..02d95e649 100644 --- a/data/conf/samples/smgeneric/cgrates.json +++ b/data/conf/samples/smgeneric/cgrates.json @@ -6,77 +6,18 @@ // This file contains the default configuration hardcoded into CGRateS. // This is what you get when you load CGRateS with an empty configuration file. -//"general": { -// "http_skip_tls_verify": false, // if enabled Http Client will accept any TLS certificate -// "rounding_decimals": 5, // system level precision for floats -// "dbdata_encoding": "msgpack", // encoding used to store object data in strings: -// "tpexport_dir": "/var/log/cgrates/tpe", // path towards export folder for offline Tariff Plans -// "http_failed_dir": "/var/log/cgrates/http_failed", // directory path where we store failed http requests -// "default_reqtype": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated> -// "default_category": "call", // default Type of Record to consider when missing from requests -// "default_tenant": "cgrates.org", // default Tenant to consider when missing from requests -// "default_subject": "cgrates", // default rating Subject to consider when missing from requests -// "default_timezone": "Local", // default timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> -// "connect_attempts": 3, // initial server connect attempts -// "reconnects": -1, // number of retries in case of connection lost -// "response_cache_ttl": "3s", // the life span of a cached response -// "internal_ttl": "2m", // maximum duration to wait for internal connections before giving up -//}, - -//"listen": { -// "rpc_json": "127.0.0.1:2012", // RPC JSON listening address -// "rpc_gob": "127.0.0.1:2013", // RPC GOB listening address -// "http": "127.0.0.1:2080", // HTTP listening address -//}, - - -//"tariffplan_db": { // database used to store active tariff plan configuration -// "db_type": "redis", // tariffplan_db type: -// "db_host": "127.0.0.1", // tariffplan_db host address -// "db_port": 6379, // port to reach the tariffplan_db -// "db_name": "10", // tariffplan_db name to connect to -// "db_user": "", // sername to use when connecting to tariffplan_db -// "db_passwd": "", // password to use when connecting to tariffplan_db -//}, - - -//"data_db": { // database used to store runtime data (eg: accounts, cdr stats) -// "db_type": "redis", // data_db type: -// "db_host": "127.0.0.1", // data_db host address -// "db_port": 6379, // data_db port to reach the database -// "db_name": "11", // data_db database name to connect to -// "db_user": "", // username to use when connecting to data_db -// "db_passwd": "", // password to use when connecting to data_db -// "load_history_size": 10, // Number of records in the load history -//}, - - -//"stor_db": { // database used to store offline tariff plans and CDRs -// "db_type": "mysql", // stor database type to use: -// "db_host": "127.0.0.1", // the host to connect to -// "db_port": 3306, // the port to reach the stordb -// "db_name": "cgrates", // stor database name -// "db_user": "cgrates", // username to use when connecting to stordb -// "db_passwd": "CGRateS.org", // password to use when connecting to stordb -// "max_open_conns": 100, // maximum database connections opened -// "max_idle_conns": 10, // maximum database connections idle -//}, - - -//"balancer": { -// "enabled": false, // start Balancer service: -//}, - - -"rater": { +"rals": { "enabled": true, // enable Rater service: -// "balancer": "", // register to balancer as worker: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality: <""|internal|x.y.z.y:1234> -// "historys": "", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> -// "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "internal", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, @@ -87,168 +28,19 @@ "cdrs": { "enabled": true, // start the CDR Server service: -// "extra_fields": [], // extra fields to store in CDRs for non-generic CDRs -// "store_cdrs": true, // store cdrs in storDb -// "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> -// "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> -// "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> -// "aliases": "", // address where to reach the aliases service, empty to disable aliases functionality: <""|internal|x.y.z.y:1234> -// "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> -// "cdr_replication":[], // replicate the raw CDR to a number of servers }, "cdrstats": { "enabled": true, // starts the cdrstats service: -// "save_interval": "1m", // interval to save changed stats into dataDb storage }, -//"cdre": { -// "*default": { -// "cdr_format": "csv", // exported CDRs format -// "field_separator": ",", -// "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) -// "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) -// "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems) -// "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT -// "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding -// "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents) -// "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export -// "mask_length": 0, // length of the destination suffix to be masked -// "export_dir": "/var/log/cgrates/cdre", // path where the exported CDRs will be placed -// "header_fields": [], // template of the exported header fields -// "content_fields": [ // template of the exported content fields -// {"tag": "CgrId", "cdr_field_id": "CgrId", "type": "cdrfield", "value": "CgrId"}, -// {"tag":"RunId", "cdr_field_id": "MediationRunId", "type": "cdrfield", "value": "MediationRunId"}, -// {"tag":"Tor", "cdr_field_id": "TOR", "type": "cdrfield", "value": "TOR"}, -// {"tag":"AccId", "cdr_field_id": "AccId", "type": "cdrfield", "value": "AccId"}, -// {"tag":"ReqType", "cdr_field_id": "ReqType", "type": "cdrfield", "value": "ReqType"}, -// {"tag":"Direction", "cdr_field_id": "Direction", "type": "cdrfield", "value": "Direction"}, -// {"tag":"Tenant", "cdr_field_id": "Tenant", "type": "cdrfield", "value": "Tenant"}, -// {"tag":"Category", "cdr_field_id": "Category", "type": "cdrfield", "value": "Category"}, -// {"tag":"Account", "cdr_field_id": "Account", "type": "cdrfield", "value": "Account"}, -// {"tag":"Subject", "cdr_field_id": "Subject", "type": "cdrfield", "value": "Subject"}, -// {"tag":"Destination", "cdr_field_id": "Destination", "type": "cdrfield", "value": "Destination"}, -// {"tag":"SetupTime", "cdr_field_id": "SetupTime", "type": "cdrfield", "value": "SetupTime", "layout": "2006-01-02T15:04:05Z07:00"}, -// {"tag":"AnswerTime", "cdr_field_id": "AnswerTime", "type": "cdrfield", "value": "AnswerTime", "layout": "2006-01-02T15:04:05Z07:00"}, -// {"tag":"Usage", "cdr_field_id": "Usage", "type": "cdrfield", "value": "Usage"}, -// {"tag":"Cost", "cdr_field_id": "Cost", "type": "cdrfield", "value": "Cost"}, -// ], -// "trailer_fields": [], // template of the exported trailer fields -// } -//}, - - -//"cdrc": { -// "*default": { -// "enabled": false, // enable CDR client functionality -// "dry_run": false, // do not send the CDRs to CDRS, just parse them -// "cdrs": "internal", // address where to reach CDR server. -// "cdr_format": "csv", // CDR file format -// "field_separator": ",", // separator used in case of csv files -// "timezone": "", // timezone for timestamps where not specified <""|UTC|Local|$IANA_TZ_DB> -// "run_delay": 0, // sleep interval in seconds between consecutive runs, 0 to use automation via inotify -// "max_open_files": 1024, // maximum simultaneous files to process, 0 for unlimited -// "data_usage_multiply_factor": 1024, // conversion factor for data usage -// "cdr_in_dir": "/var/log/cgrates/cdrc/in", // absolute path towards the directory where the CDRs are stored -// "cdr_out_dir": "/var/log/cgrates/cdrc/out", // absolute path towards the directory where processed CDRs will be moved -// "failed_calls_prefix": "missed_calls", // used in case of flatstore CDRs to avoid searching for BYE records -// "cdr_source_id": "freeswitch_csv", // free form field, tag identifying the source of the CDRs within CDRS database -// "cdr_filter": "", // filter CDR records to import -// "partial_record_cache": "10s", // duration to cache partial records when not pairing -// "header_fields": [], // template of the import header fields -// "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value -// {"tag": "tor", "cdr_field_id": "TOR", "type": "cdrfield", "value": "2", "mandatory": true}, -// {"tag": "accid", "cdr_field_id": "AccId", "type": "cdrfield", "value": "3", "mandatory": true}, -// {"tag": "reqtype", "cdr_field_id": "ReqType", "type": "cdrfield", "value": "4", "mandatory": true}, -// {"tag": "direction", "cdr_field_id": "Direction", "type": "cdrfield", "value": "5", "mandatory": true}, -// {"tag": "tenant", "cdr_field_id": "Tenant", "type": "cdrfield", "value": "6", "mandatory": true}, -// {"tag": "category", "cdr_field_id": "Category", "type": "cdrfield", "value": "7", "mandatory": true}, -// {"tag": "account", "cdr_field_id": "Account", "type": "cdrfield", "value": "8", "mandatory": true}, -// {"tag": "subject", "cdr_field_id": "Subject", "type": "cdrfield", "value": "9", "mandatory": true}, -// {"tag": "destination", "cdr_field_id": "Destination", "type": "cdrfield", "value": "10", "mandatory": true}, -// {"tag": "setup_time", "cdr_field_id": "SetupTime", "type": "cdrfield", "value": "11", "mandatory": true}, -// {"tag": "answer_time", "cdr_field_id": "AnswerTime", "type": "cdrfield", "value": "12", "mandatory": true}, -// {"tag": "usage", "cdr_field_id": "Usage", "type": "cdrfield", "value": "13", "mandatory": true}, -// ], -// "trailer_fields": [], // template of the import trailer fields -// } -//}, - "sm_generic": { "enabled": true, // starts SessionManager service: -// "listen_bijson": "127.0.0.1:2014", // address where to listen for bidirectional JSON-RPC requests -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server <""|internal|x.y.z.y:1234> -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last }, -//"sm_freeswitch": { -// "enabled": false, // starts SessionManager service: -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "extra_fields": [], // extra fields to store in auth/CDRs when creating them -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval) -// "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls -// "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance -// "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined) -// "subscribe_park": true, // subscribe via fsock to receive park events -// "channel_sync_interval": "5m", // sync channels with freeswitch regularly -// "connections":[ // instantiate connections to multiple FreeSWITCH servers -// {"server": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} -// ], -//}, - - -//"sm_kamailio": { -// "enabled": false, // starts SessionManager service: -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "connections":[ // instantiate connections to multiple Kamailio servers -// {"evapi_addr": "127.0.0.1:8448", "reconnects": 5} -// ], -//}, - - -//"sm_opensips": { -// "enabled": false, // starts SessionManager service: -// "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS -// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> -// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> -// "reconnects": 5, // number of reconnects if connection is lost -// "create_cdr": false, // create CDR out of events and sends them to CDRS component -// "debit_interval": "10s", // interval to perform debits on. -// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this -// "max_call_duration": "3h", // maximum call duration a prepaid call can last -// "events_subscribe_interval": "60s", // automatic events subscription to OpenSIPS, 0 to disable it -// "mi_addr": "127.0.0.1:8020", // address where to reach OpenSIPS MI to send session disconnects -//}, - - -//"historys": { -// "enabled": false, // starts History service: . -// "history_dir": "/var/log/cgrates/history", // location on disk where to store history files. -// "save_interval": "1s", // interval to save changed cache into .git archive -//}, - - -//"pubsubs": { -// "enabled": false, // starts PubSub service: . -//}, - - "aliases": { "enabled": true, // starts Aliases service: . }, @@ -256,45 +48,7 @@ "users": { "enabled": true, // starts User service: . -// "indexes": [], // user profile field indexes }, -//"mailer": { -// "server": "localhost", // the server to use when sending emails out -// "auth_user": "cgrates", // authenticate to email server using this user -// "auth_passwd": "CGRateS.org", // authenticate to email server with this password -// "from_address": "cgr-mailer@localhost.localdomain" // from address used when sending emails out -//}, - - -//"suretax": { -// "url": "", // API url -// "client_number": "", // client number, provided by SureTax -// "validation_key": "", // validation key provided by SureTax -// "business_unit": "", // client’s Business Unit -// "timezone": "Local", // convert the time of the events to this timezone before sending request out -// "include_local_cost": false, // sum local calculated cost with tax one in final cost -// "return_file_code": "0", // default or Quote purposes <0|Q> -// "response_group": "03", // determines how taxes are grouped for the response <03|13> -// "response_type": "D4", // determines the granularity of taxes and (optionally) the decimal precision for the tax calculations and amounts in the response -// "regulatory_code": "03", // provider type -// "client_tracking": "CgrId", // template extracting client information out of StoredCdr; <$RSRFields> -// "customer_number": "Subject", // template extracting customer number out of StoredCdr; <$RSRFields> -// "orig_number": "Subject", // template extracting origination number out of StoredCdr; <$RSRFields> -// "term_number": "Destination", // template extracting termination number out of StoredCdr; <$RSRFields> -// "bill_to_number": "", // template extracting billed to number out of StoredCdr; <$RSRFields> -// "zipcode": "", // template extracting billing zip code out of StoredCdr; <$RSRFields> -// "plus4": "", // template extracting billing zip code extension out of StoredCdr; <$RSRFields> -// "p2pzipcode": "", // template extracting secondary zip code out of StoredCdr; <$RSRFields> -// "p2pplus4": "", // template extracting secondary zip code extension out of StoredCdr; <$RSRFields> -// "units": "^1", // template extracting number of “lines” or unique charges contained within the revenue out of StoredCdr; <$RSRFields> -// "unit_type": "^00", // template extracting number of unique access lines out of StoredCdr; <$RSRFields> -// "tax_included": "^0", // template extracting tax included in revenue out of StoredCdr; <$RSRFields> -// "tax_situs_rule": "^04", // template extracting tax situs rule out of StoredCdr; <$RSRFields> -// "trans_type_code": "^010101", // template extracting transaction type indicator out of StoredCdr; <$RSRFields> -// "sales_type_code": "^R", // template extracting sales type code out of StoredCdr; <$RSRFields> -// "tax_exemption_code_list": "", // template extracting tax exemption code list out of StoredCdr; <$RSRFields> -//}, - -} \ No newline at end of file +} diff --git a/data/conf/samples/tutlocal/cgrates.json b/data/conf/samples/tutlocal/cgrates.json index 14e933058..3e3682d80 100644 --- a/data/conf/samples/tutlocal/cgrates.json +++ b/data/conf/samples/tutlocal/cgrates.json @@ -10,11 +10,17 @@ "http": ":2080", // HTTP listening address }, -"rater": { +"rals": { "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "pubsubs": "internal", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], }, "scheduler": { @@ -23,8 +29,9 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], }, "cdrstats": { diff --git a/data/diameter/dict/huawei/huawei.xml b/data/diameter/dict/huawei/huawei.xml index b180087a7..4f48257d3 100644 --- a/data/diameter/dict/huawei/huawei.xml +++ b/data/diameter/dict/huawei/huawei.xml @@ -36,6 +36,15 @@ + + + + + + + + + @@ -133,7 +142,7 @@ - + @@ -143,4 +152,4 @@ - \ No newline at end of file + diff --git a/data/diameter/dict/huawei/vodafone.xml b/data/diameter/dict/huawei/vodafone.xml index ff56332c0..be03b5746 100644 --- a/data/diameter/dict/huawei/vodafone.xml +++ b/data/diameter/dict/huawei/vodafone.xml @@ -43,8 +43,8 @@ - + - \ No newline at end of file + diff --git a/data/docker/devel/Dockerfile b/data/docker/devel/Dockerfile index 47e251317..dea7243d8 100644 --- a/data/docker/devel/Dockerfile +++ b/data/docker/devel/Dockerfile @@ -17,7 +17,7 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927 RUN echo 'deb http://repo.mongodb.org/apt/debian wheezy/mongodb-org/3.2 main' | tee '/etc/apt/sources.list.d/mongodb-org-3.2.list' # install dependencies -RUN apt-get -y update && apt-get -y install git bzr mercurial redis-server mysql-server python-pycurl python-mysqldb postgresql postgresql-client sudo wget freeswitch-meta-vanilla vim zsh mongodb-org +RUN apt-get -y update && apt-get -y install git bzr mercurial redis-server mysql-server python-pycurl python-mysqldb postgresql postgresql-client sudo wget freeswitch-meta-vanilla vim zsh mongodb-org tmux rsyslog ngrep # add mongo conf COPY mongod.conf /etc/mongod.conf @@ -26,7 +26,7 @@ COPY mongod.conf /etc/mongod.conf RUN useradd -c CGRateS -d /var/run/cgrates -s /bin/false -r cgrates # install golang -RUN wget -qO- https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz | tar xzf - -C /root/ +RUN wget -qO- https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz | tar xzf - -C /root/ #install glide RUN GOROOT=/root/go GOPATH=/root/code /root/go/bin/go get github.com/Masterminds/glide @@ -34,6 +34,9 @@ RUN GOROOT=/root/go GOPATH=/root/code /root/go/bin/go get github.com/Masterminds #install oh-my-zsh RUN TERM=xterm sh -c "$(wget https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)"; exit 0 +# change shell for tmux +RUN chsh -s /usr/bin/zsh + # cleanup RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/data/docker/devel/start.sh b/data/docker/devel/start.sh index fbacacd13..0a4a75591 100755 --- a/data/docker/devel/start.sh +++ b/data/docker/devel/start.sh @@ -2,6 +2,7 @@ sed -i 's/127.0.0.1/0.0.0.0/g' /etc/redis/redis.conf /etc/mysql/my.cnf echo 'host all all 0.0.0.0/32 md5'>>/etc/postgresql/9.4/main/pg_hba.conf +/etc/init.d/rsyslog start /etc/init.d/mysql start /etc/init.d/postgresql start /etc/init.d/redis-server start @@ -30,8 +31,10 @@ cd /root/cgr #glide -y devel.yaml install ./build.sh -# create cgr-engine link -ln -s /root/code/bin/cgr-engine /usr/bin/cgr-engine +# create cgr-engine and cgr-loader link +ln -s /root/code/bin/cgr-engine /usr/bin/ +ln -s /root/code/bin/cgr-loader /usr/bin/ + # expand freeswitch conf cd /usr/share/cgrates/tutorials/fs_evsock/freeswitch/etc/ && tar xzf freeswitch_conf.tar.gz diff --git a/data/storage/mysql/create_cdrs_tables.sql b/data/storage/mysql/create_cdrs_tables.sql index 3f5e0269d..c6e86d496 100644 --- a/data/storage/mysql/create_cdrs_tables.sql +++ b/data/storage/mysql/create_cdrs_tables.sql @@ -29,11 +29,11 @@ CREATE TABLE cdrs ( cost DECIMAL(20,4) NOT NULL, cost_details text, extra_info text, - created_at TIMESTAMP, - updated_at TIMESTAMP, - deleted_at TIMESTAMP, + created_at TIMESTAMP NULL, + updated_at TIMESTAMP NULL, + deleted_at TIMESTAMP NULL, PRIMARY KEY (id), - UNIQUE KEY cdrrun (cgrid, run_id) + UNIQUE KEY cdrrun (cgrid, run_id, origin_id) ); DROP TABLE IF EXISTS sm_costs; @@ -41,11 +41,15 @@ CREATE TABLE sm_costs ( id int(11) NOT NULL AUTO_INCREMENT, cgrid char(40) NOT NULL, run_id varchar(64) NOT NULL, + origin_host varchar(64) NOT NULL, + origin_id varchar(64) NOT NULL, cost_source varchar(64) NOT NULL, + `usage` DECIMAL(30,9) NOT NULL, cost_details text, - created_at TIMESTAMP, - deleted_at TIMESTAMP, + created_at TIMESTAMP NULL, + deleted_at TIMESTAMP NULL, PRIMARY KEY (`id`), UNIQUE KEY costid (cgrid,run_id), + KEY origin_idx (origin_host, origin_id), KEY deleted_at_idx (deleted_at) ); diff --git a/data/storage/mysql/create_tariffplan_tables.sql b/data/storage/mysql/create_tariffplan_tables.sql index b6240cdc5..52bbf074b 100644 --- a/data/storage/mysql/create_tariffplan_tables.sql +++ b/data/storage/mysql/create_tariffplan_tables.sql @@ -152,7 +152,7 @@ CREATE TABLE `tp_actions` ( `balance_tag` varchar(64) NOT NULL, `balance_type` varchar(24) NOT NULL, `directions` varchar(8) NOT NULL, - `units` varchar(24) NOT NULL, + `units` varchar(256) NOT NULL, `expiry_time` varchar(24) NOT NULL, `timing_tags` varchar(128) NOT NULL, `destination_tags` varchar(64) NOT NULL, diff --git a/data/storage/postgres/create_cdrs_tables.sql b/data/storage/postgres/create_cdrs_tables.sql index d4425fb89..ed9751694 100644 --- a/data/storage/postgres/create_cdrs_tables.sql +++ b/data/storage/postgres/create_cdrs_tables.sql @@ -30,9 +30,9 @@ CREATE TABLE cdrs ( cost_details jsonb, extra_info text, created_at TIMESTAMP, - updated_at TIMESTAMP, - deleted_at TIMESTAMP, - UNIQUE (cgrid, run_id) + updated_at TIMESTAMP NULL, + deleted_at TIMESTAMP NULL, + UNIQUE (cgrid, run_id, origin_id) ); ; DROP INDEX IF EXISTS deleted_at_cp_idx; @@ -44,11 +44,19 @@ CREATE TABLE sm_costs ( id SERIAL PRIMARY KEY, cgrid CHAR(40) NOT NULL, run_id VARCHAR(64) NOT NULL, + origin_host VARCHAR(64) NOT NULL, + origin_id VARCHAR(64) NOT NULL, cost_source VARCHAR(64) NOT NULL, + usage NUMERIC(30,9) NOT NULL, cost_details jsonb, created_at TIMESTAMP, - deleted_at TIMESTAMP, + deleted_at TIMESTAMP NULL, UNIQUE (cgrid, run_id) ); +DROP INDEX IF EXISTS cgrid_smcost_idx; +CREATE INDEX cgrid_smcost_idx ON sm_costs (cgrid, run_id); +DROP INDEX IF EXISTS origin_smcost_idx; +CREATE INDEX origin_smcost_idx ON sm_costs (origin_host, origin_id); DROP INDEX IF EXISTS deleted_at_smcost_idx; CREATE INDEX deleted_at_smcost_idx ON sm_costs (deleted_at); + diff --git a/data/storage/postgres/create_tariffplan_tables.sql b/data/storage/postgres/create_tariffplan_tables.sql index b5d8193c8..e2bc89a57 100644 --- a/data/storage/postgres/create_tariffplan_tables.sql +++ b/data/storage/postgres/create_tariffplan_tables.sql @@ -147,7 +147,7 @@ CREATE TABLE tp_actions ( balance_tag VARCHAR(64) NOT NULL, balance_type VARCHAR(24) NOT NULL, directions VARCHAR(8) NOT NULL, - units VARCHAR(10) NOT NULL, + units VARCHAR(256) NOT NULL, expiry_time VARCHAR(24) NOT NULL, timing_tags VARCHAR(128) NOT NULL, destination_tags VARCHAR(64) NOT NULL, diff --git a/data/tariffplans/cdrstats/CdrStats.csv b/data/tariffplans/cdrstats/CdrStats.csv index bf60b63b4..e52e8c15c 100644 --- a/data/tariffplans/cdrstats/CdrStats.csv +++ b/data/tariffplans/cdrstats/CdrStats.csv @@ -1,6 +1,6 @@ #Id[0],QueueLength[1],TimeWindow[2],SaveInerval[3],Metric[4],SetupInterval[5],TOR[6],CdrHost[7],CdrSource[8],ReqType[9],Direction[10],Tenant[11],Category[12],Account[13],Subject[14],DestinationPrefix[15],PddInterval[16],UsageInterval[17],Supplier[18],DisconnectCause[19],MediationRunIds[20],RatedAccount[21],RatedSubject[22],CostInterval[23],Triggers[24] -CDRST3,5,60m,,ASR,2014-07-29T15:00:00Z;2014-07-29T16:00:00Z,*voice,87.139.12.167,FS_JSON,rated,*out,cgrates.org,call,dan,dan,+49,,5m;10m,,,default,rif,rif,0;2,CDRST3_WARN_ASR +CDRST3,5,60m,,ASR,2014-07-29T15:00:00Z;2014-07-29T16:00:00Z,*voice,87.139.12.167,FS_JSON,rated,*out,cgrates.org,call,dan,dan,+49,,5m;10m,,,*default,rif,rif,0;2,CDRST3_WARN_ASR CDRST3,,,,ACD,,,,,,,,,,,,,,,,,,,,CDRST3_WARN_ACD CDRST3,,,,ACC,,,,,,,,,,,,,,,,,,,,CDRST3_WARN_ACC -CDRST4,10,0,,ASR,,,,,,,cgrates.org,call,,,,,,,,,,,,CDRST4_WARN_ASR +CDRST4,10,0,,ASR,,,,,,,cgrates.org,call,,,,,,,,*default,,,,CDRST4_WARN_ASR CDRST4,,,,ACD,,,,,,,,,,,,,,,,,,,,CDRST4_WARN_ACD diff --git a/data/tariffplans/testtp/AccountActions.csv b/data/tariffplans/testtp/AccountActions.csv index ca940949b..3adcdcb1f 100644 --- a/data/tariffplans/testtp/AccountActions.csv +++ b/data/tariffplans/testtp/AccountActions.csv @@ -4,4 +4,10 @@ cgrates.org,1002,PREPAID_10,STANDARD_TRIGGERS,, cgrates.org,1003,PREPAID_10,STANDARD_TRIGGERS,, cgrates.org,1004,PREPAID_10,STANDARD_TRIGGERS,, cgrates.org,1005,PREPAID_10,STANDARD_TRIGGERS,, -cgrates.org,1009,TEST_EXE,,, \ No newline at end of file +cgrates.org,1009,TEST_EXE,,, +cgrates.org,1010,TEST_DATA_r,,true, +cgrates.org,1011,TEST_VOICE,,, +cgrates.org,1012,PREPAID_10,,, +cgrates.org,1013,TEST_NEG,,, +cgrates.org,1014,TEST_RPC,,, +cgrates.org,1015,TEST_DID,,, diff --git a/data/tariffplans/testtp/ActionPlans.csv b/data/tariffplans/testtp/ActionPlans.csv index 670cf8c93..636888218 100644 --- a/data/tariffplans/testtp/ActionPlans.csv +++ b/data/tariffplans/testtp/ActionPlans.csv @@ -1,4 +1,9 @@ #Tag,ActionsTag,TimingTag,Weight PREPAID_10,PREPAID_10,ASAP,10 PREPAID_10,BONUS_1,ASAP,10 -TEST_EXE,TOPUP_EXE,ALWAYS,10 \ No newline at end of file +TEST_EXE,TOPUP_EXE,ALWAYS,10 +TEST_DATA_r,TOPUP_DATA_r,ASAP,10 +TEST_VOICE,TOPUP_VOICE,ASAP,10 +TEST_NEG,TOPUP_NEG,ASAP,10 +TEST_RPC,RPC,ALWAYS,10 +TEST_DID,DID,ALWAYS,10 diff --git a/data/tariffplans/testtp/ActionTriggers.csv b/data/tariffplans/testtp/ActionTriggers.csv index 795ef6ab8..8a1c88cac 100644 --- a/data/tariffplans/testtp/ActionTriggers.csv +++ b/data/tariffplans/testtp/ActionTriggers.csv @@ -2,6 +2,7 @@ STANDARD_TRIGGERS,,*min_balance,2,false,0,,,,*monetary,*out,,,,,,,,,,,LOG_BALANCE,10 STANDARD_TRIGGERS,,*max_balance,20,false,0,,,,*monetary,*out,,,,,,,,,,,LOG_BALANCE,10 STANDARD_TRIGGERS,,*max_event_counter,15,false,0,,,,*monetary,*out,,FS_USERS,,,,,,,,,LOG_BALANCE,10 +STANDARD_TRIGGERS,,*max_balance_counter,1,false,0,,,,*monetary,*out,,,,,,,,,,,LOG_BALANCE,10 CDRST1_WARN_ASR,,*min_asr,45,true,1h,,,,,,,,,,,,,,,3,CDRST_WARN_HTTP,10 CDRST1_WARN_ACD,,*min_acd,10,true,1h,,,,,,,,,,,,,,,5,CDRST_WARN_HTTP,10 CDRST1_WARN_ACC,,*max_acc,10,true,10m,,,,,,,,,,,,,,,5,CDRST_WARN_HTTP,10 diff --git a/data/tariffplans/testtp/Actions.csv b/data/tariffplans/testtp/Actions.csv index 045fc0666..20d61a27d 100644 --- a/data/tariffplans/testtp/Actions.csv +++ b/data/tariffplans/testtp/Actions.csv @@ -4,4 +4,11 @@ BONUS_1,*topup,,,,*monetary,*out,,*any,,,*unlimited,,1,10,false,false,10 LOG_BALANCE,*log,,,,,,,,,,,,,,false,false,10 CDRST_WARN_HTTP,*call_url,http://localhost:8080,,,,,,,,,,,,,false,false,10 CDRST_LOG,*log,,,,,,,,,,,,,,false,false,10 -TOPUP_EXE,*topup,,,,*monetary,*out,,*any,,,*unlimited,,5,10,false,false,10 \ No newline at end of file +TOPUP_EXE,*topup,,,,*monetary,*out,,*any,,,*unlimited,,5,10,false,false,10 +TOPUP_DATA_r,*topup,,,,*monetary,*out,,DATA_DEST,,,*unlimited,,5000000,10,false,false,10 +TOPUP_DATA_r,*topup,,,,*data,*out,,DATA_DEST,datar,,*unlimited,,50000000000,10,false,false,10 +TOPUP_VOICE,*topup,,,,*voice,*out,,GERMANY_MOBILE,,,*unlimited,,50000,10,false,false,10 +TOPUP_NEG,*topup,,,,*voice,*out,,GERMANY;!GERMANY_MOBILE,*zero1m,,*unlimited,,100,10,false,false,10 +RPC,*cgr_rpc,"{""Address"": ""localhost:2013"",""Transport"":""*gob"",""Method"":""ApierV2.SetAccount"",""Attempts"":1,""Async"" :false,""Params"":{""Account"":""rpc"",""Tenant"":""cgrates.org""}}",,,,,,,,,,,,,,, +DID,*debit,,,,*monetary,*out,,*any,,,*unlimited,*any,"{""Method"":""*incremental"",""Params"":{""Units"":1, ""Interval"":""month"",""Increment"":""day""}}",10.0,,,10.0 +DID,*cdrlog,"{""action"":""^DID"",""prev_balance"":""BalanceValue""}",,,*monetary,*out,,*any,,,*unlimited,,,10.0,,,10.0 diff --git a/data/tariffplans/testtp/DerivedChargers.csv b/data/tariffplans/testtp/DerivedChargers.csv index b50586df9..5493da35d 100644 --- a/data/tariffplans/testtp/DerivedChargers.csv +++ b/data/tariffplans/testtp/DerivedChargers.csv @@ -3,3 +3,4 @@ *out,cgrates.org,call,dan,dan,,extra2,,,,,,^ivo,^ivo,,,,,,*default,*default,*default,*default *out,cgrates.org,call,dan,dan,,extra3,~filterhdr1:s/(.+)/special_run3/,,,,,^runusr3,^runusr3,,,,,,*default,*default,*default,*default *out,cgrates.org,call,dan,*any,,extra1,,,,,,^rif2,^rif2,,,,,,*default,*default,*default,*default +*out,cgrates.org,call,1011,1011,GERMANY,extra1,,,,,,,,^+4915,,,,,*default,*default,*default,*default \ No newline at end of file diff --git a/data/tariffplans/testtp/DestinationRates.csv b/data/tariffplans/testtp/DestinationRates.csv index b5d2474b3..2f51c5c18 100644 --- a/data/tariffplans/testtp/DestinationRates.csv +++ b/data/tariffplans/testtp/DestinationRates.csv @@ -3,3 +3,5 @@ DR_RETAIL,GERMANY,RT_1CENT,*up,4,0, DR_RETAIL,GERMANY_MOBILE,RT_1CENT,*up,4,0, DR_DATA_1,*any,RT_DATA_2c,*up,4,0, DR_SMS_1,*any,RT_SMS_5c,*up,4,0, +DR_DATA_r,DATA_DEST,RT_DATA_r,*up,5,0, +DR_FREE,GERMANY,RT_ZERO,*middle,2,0, diff --git a/data/tariffplans/testtp/Destinations.csv b/data/tariffplans/testtp/Destinations.csv index 192146546..37385ea00 100644 --- a/data/tariffplans/testtp/Destinations.csv +++ b/data/tariffplans/testtp/Destinations.csv @@ -3,3 +3,4 @@ GERMANY,+49 GERMANY_MOBILE,+4915 GERMANY_MOBILE,+4916 GERMANY_MOBILE,+4917 +DATA_DEST,222 diff --git a/data/tariffplans/testtp/Rates.csv b/data/tariffplans/testtp/Rates.csv index 991ce0720..b8860104b 100644 --- a/data/tariffplans/testtp/Rates.csv +++ b/data/tariffplans/testtp/Rates.csv @@ -2,3 +2,5 @@ RT_1CENT,0,1,1s,1s,0s RT_DATA_2c,0,0.002,10,10,0 RT_SMS_5c,0,0.005,1,1,0 +RT_DATA_r,0,0.1,1048576,10240,0 +RT_ZERO,0,0,1,1,0 diff --git a/data/tariffplans/testtp/RatingPlans.csv b/data/tariffplans/testtp/RatingPlans.csv index 7d15f1a92..5be68df5d 100644 --- a/data/tariffplans/testtp/RatingPlans.csv +++ b/data/tariffplans/testtp/RatingPlans.csv @@ -2,3 +2,5 @@ RP_RETAIL,DR_RETAIL,ALWAYS,10 RP_DATA1,DR_DATA_1,ALWAYS,10 RP_SMS1,DR_SMS_1,ALWAYS,10 +RP_DATAr,DR_DATA_r,ALWAYS,10 +RP_FREE,DR_FREE,ALWAYS,10 diff --git a/data/tariffplans/testtp/RatingProfiles.csv b/data/tariffplans/testtp/RatingProfiles.csv index db758126b..3a6a28222 100644 --- a/data/tariffplans/testtp/RatingProfiles.csv +++ b/data/tariffplans/testtp/RatingProfiles.csv @@ -2,3 +2,5 @@ *out,cgrates.org,call,*any,2012-01-01T00:00:00Z,RP_RETAIL,, *out,cgrates.org,data,*any,2012-01-01T00:00:00Z,RP_DATA1,, *out,cgrates.org,sms,*any,2012-01-01T00:00:00Z,RP_SMS1,, +*out,cgrates.org,data,datar,2016-01-01T00:00:00Z,RP_DATAr,, +*out,cgrates.org,call,free,2016-01-01T00:00:00Z,RP_FREE,, diff --git a/data/tariffplans/tutorial/ActionPlans.csv b/data/tariffplans/tutorial/ActionPlans.csv index 3d59fe3e9..69d8ee8fb 100644 --- a/data/tariffplans/tutorial/ActionPlans.csv +++ b/data/tariffplans/tutorial/ActionPlans.csv @@ -6,3 +6,4 @@ USE_SHARED_A,SHARED_A_0,*asap,10 PACKAGE_1001,TOPUP_RST_5,*asap,10 PACKAGE_1001,TOPUP_RST_SHARED_5,*asap,10 PACKAGE_1001,TOPUP_120_DST1003,*asap,10 +PACKAGE_1001,TOPUP_RST_DATA_100,*asap,10 diff --git a/data/tariffplans/tutorial/Actions.csv b/data/tariffplans/tutorial/Actions.csv index 93a3faa1f..83c3a9f66 100644 --- a/data/tariffplans/tutorial/Actions.csv +++ b/data/tariffplans/tutorial/Actions.csv @@ -5,6 +5,7 @@ TOPUP_RST_5,*topup_reset,,,,*voice,*out,,DST_1002,SPECIAL_1002,,*unlimited,,90,2 TOPUP_120_DST1003,*topup_reset,,,,*voice,*out,,DST_1003,,,*unlimited,,120,20,false,false,10 TOPUP_RST_SHARED_5,*topup,,,,*monetary,*out,,*any,,SHARED_A,*unlimited,,5,10,false,false,10 SHARED_A_0,*topup_reset,,,,*monetary,*out,,*any,,SHARED_A,*unlimited,,0,10,false,false,10 +TOPUP_RST_DATA_100,*topup_reset,,,,*data,*out,,*any,,,*unlimited,,102400,10,false,false,10 LOG_WARNING,*log,,,,,,,,,,,,,,false,false,10 DISABLE_AND_LOG,*log,,,,,,,,,,,,,,false,false,10 DISABLE_AND_LOG,*disable_account,,,,,,,,,,,,,,false,false,10 diff --git a/data/tutorials/fs_evsock/cgrates/etc/cgrates/cgrates.json b/data/tutorials/fs_evsock/cgrates/etc/cgrates/cgrates.json index b21f7fd59..1425caab7 100644 --- a/data/tutorials/fs_evsock/cgrates/etc/cgrates/cgrates.json +++ b/data/tutorials/fs_evsock/cgrates/etc/cgrates/cgrates.json @@ -7,13 +7,23 @@ // This is what you get when you load CGRateS with an empty configuration file. -"rater": { +"rals": { "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "historys": "internal", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> - "pubsubs": "internal", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "internal" + "cdrstats_conns": [ + {"address": "*internal"} + ], + "historys_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, @@ -24,8 +34,9 @@ "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], }, @@ -99,12 +110,10 @@ "sm_freeswitch": { "enabled": true, // starts SessionManager service: - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> "debit_interval": "5s", // interval to perform debits on. "channel_sync_interval": "10s", - "connections":[ // instantiate connections to multiple FreeSWITCH servers - {"server": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 15} + "event_socket_conns":[ // instantiate connections to multiple FreeSWITCH servers + {"address": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5} ], }, diff --git a/data/tutorials/kamevapi/cgrates/etc/cgrates/cgrates.json b/data/tutorials/kamevapi/cgrates/etc/cgrates/cgrates.json index 6d9890381..3a3764e9c 100644 --- a/data/tutorials/kamevapi/cgrates/etc/cgrates/cgrates.json +++ b/data/tutorials/kamevapi/cgrates/etc/cgrates/cgrates.json @@ -7,48 +7,59 @@ // This is what you get when you load CGRateS with an empty configuration file. -"rater": { - "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "historys": "internal", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> - "pubsubs": "internal", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "internal" +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], + "historys_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { - "enabled": true, // start Scheduler service: + "enabled": true, }, "cdrs": { - "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service. Empty to disable stats gathering <""|internal|x.y.z.y:1234> + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], }, "cdrstats": { - "enabled": true, // starts the cdrstats service: + "enabled": true, }, "cdre": { "*default": { - "cdr_format": "csv", // exported CDRs format + "cdr_format": "csv", "field_separator": ",", - "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) - "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) - "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENRIC unit to call duration in some billing systems) - "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT - "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding - "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents) - "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export - "mask_length": 0, // length of the destination suffix to be masked - "export_dir": "/tmp/cgr_kamevapi/cgrates/cdre", // path where the exported CDRs will be placed - "header_fields": [], // template of the exported header fields - "content_fields": [ // template of the exported content fields + "data_usage_multiply_factor": 1, + "sms_usage_multiply_factor": 1, + "generic_usage_multiply_factor": 1, + "cost_multiply_factor": 1, + "cost_rounding_decimals": -1, + "cost_shift_digits": 0, + "mask_destination_id": "MASKED_DESTINATIONS", + "mask_length": 0, + "export_dir": "/tmp/cgr_kamevapi/cgrates/cdre", + "header_fields": [], + "content_fields": [ {"tag": "CgrId", "cdr_field_id": "cgrid", "type": "cdrfield", "value": "cgrid"}, {"tag":"RunId", "cdr_field_id": "mediation_runid", "type": "cdrfield", "value": "mediation_runid"}, {"tag":"Tor", "cdr_field_id": "tor", "type": "cdrfield", "value": "tor"}, @@ -65,22 +76,22 @@ {"tag":"Usage", "cdr_field_id": "usage", "type": "cdrfield", "value": "usage"}, {"tag":"Cost", "cdr_field_id": "cost", "type": "cdrfield", "value": "cost"}, ], - "trailer_fields": [], // template of the exported trailer fields + "trailer_fields": [], }, "customer_tpl": { - "cdr_format": "csv", // exported CDRs format + "cdr_format": "csv", "field_separator": ";", - "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes) - "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems) - "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems) - "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT - "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding - "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents) - "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export - "mask_length": 0, // length of the destination suffix to be masked - "export_dir": "/tmp/cgr_kamevapi/cgrates/cdre", // path where the exported CDRs will be placed - "header_fields": [], // template of the exported header fields - "content_fields": [ // template of the exported content fields + "data_usage_multiply_factor": 1, + "sms_usage_multiply_factor": 1, + "generic_usage_multiply_factor": 1, + "cost_multiply_factor": 1, + "cost_rounding_decimals": -1, + "cost_shift_digits": 0, + "mask_destination_id": "MASKED_DESTINATIONS", + "mask_length": 0, + "export_dir": "/tmp/cgr_kamevapi/cgrates/cdre", + "header_fields": [], + "content_fields": [ {"tag": "CgrId", "cdr_field_id": "cgrid", "type": "cdrfield", "value": "cgrid"}, {"tag":"AccId", "cdr_field_id": "accid", "type": "cdrfield", "value": "accid"}, {"tag":"ReqType", "cdr_field_id": "reqtype", "type": "cdrfield", "value": "reqtype"}, @@ -92,37 +103,36 @@ {"tag":"Usage", "cdr_field_id": "usage", "type": "cdrfield", "value": "usage"}, {"tag":"Cost", "cdr_field_id": "cost", "type": "cdrfield", "value": "cost"}, ], - "trailer_fields": [], // template of the exported trailer fields + "trailer_fields": [], } }, "sm_kamailio": { - "enabled": true, // starts SessionManager service: - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> - "create_cdr": true, // create CDR out of events and sends them to CDRS component + "enabled": true, + "create_cdr": true, }, "historys": { - "enabled": true, // starts History service: . - "history_dir": "/tmp/cgr_kamevapi/cgrates/history", // location on disk where to store history files. + "enabled": true, + "history_dir": "/tmp/cgr_kamevapi/cgrates/history", }, "pubsubs": { - "enabled": true, // starts PubSub service: . + "enabled": true, }, "aliases": { - "enabled": true, // starts PubSub service: . + "enabled": true, }, "users": { - "enabled": true, // starts User service: . - "indexes": ["Uuid"], // user profile field indexes + "enabled": true, + "indexes": ["Uuid"], }, diff --git a/data/tutorials/osips_async/cgrates/etc/cgrates/cgrates.json b/data/tutorials/osips_async/cgrates/etc/cgrates/cgrates.json index 9a3488e09..225830f24 100644 --- a/data/tutorials/osips_async/cgrates/etc/cgrates/cgrates.json +++ b/data/tutorials/osips_async/cgrates/etc/cgrates/cgrates.json @@ -7,25 +7,36 @@ // This is what you get when you load CGRateS with an empty configuration file. -"rater": { - "enabled": true, // enable Rater service: - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> - "historys": "internal", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234> - "pubsubs": "internal", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234> - "users": "internal", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234> - "aliases": "internal", +"rals": { + "enabled": true, + "cdrstats_conns": [ + {"address": "*internal"} + ], + "historys_conns": [ + {"address": "*internal"} + ], + "pubsubs_conns": [ + {"address": "*internal"} + ], + "users_conns": [ + {"address": "*internal"} + ], + "aliases_conns": [ + {"address": "*internal"} + ], }, "scheduler": { - "enabled": true, // start Scheduler service: + "enabled": true, }, "cdrs": { "enabled": true, // start the CDR Server service: - "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234> - "cdrstats": "internal", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234> + "cdrstats_conns": [ + {"address": "*internal"} + ], }, @@ -99,13 +110,8 @@ "sm_opensips": { "enabled": true, // starts SessionManager service: - "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS - "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013> - "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234> "create_cdr": true, // create CDR out of events and sends them to CDRS component "debit_interval": "2s", // interval to perform debits on. - "events_subscribe_interval": "60s", // automatic events subscription to OpenSIPS, 0 to disable it - "mi_addr": "127.0.0.1:8020", // address where to reach OpenSIPS MI to send session disconnects }, diff --git a/data/vagrant/Vagrantfile b/data/vagrant/Vagrantfile index 680947427..85d56090a 100644 --- a/data/vagrant/Vagrantfile +++ b/data/vagrant/Vagrantfile @@ -38,7 +38,7 @@ Vagrant.configure(2) do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - # config.vm.synced_folder "../data", "/vagrant_data" + config.vm.synced_folder "../../", "/home/vagrant/code/src/github.com/cgrates/cgrates", owner: "vagrant", group: "vagrant" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/data/vagrant/cgrates_devel.yml b/data/vagrant/cgrates_devel.yml index 4002907c0..9b7d90478 100644 --- a/data/vagrant/cgrates_devel.yml +++ b/data/vagrant/cgrates_devel.yml @@ -25,61 +25,60 @@ apt: pkg={{ item }} update_cache=yes state=latest with_items: - git - - bzr - mercurial - redis-server - - mysql-server + - mysql-server - postgresql-9.4 - mongodb-org - freeswitch-meta-vanilla - freeswitch-mod-json-cdr - - libyuv-dev + - libyuv-dev + - python-mysqldb + - python-pymongo - name: update mysql root password for root account - mysql_user: name=cgrates host=localhost password={{ root_db_password }} + mysql_user: name=root host=localhost password={{ root_db_password }} state=present - name: copy .my.cnf template: src=my.cnf dest=/root/.my.cnf mode=0600 - -- hosts: all + +- hosts: all vars: root_db_password: CGRateS.org go_version: 1.6 - + tasks: + - name: create cgrates path + file: path=/home/vagrant/code/src/github.com/cgrates state=directory + - name: get golang - unarchive: src=https://storage.googleapis.com/golang/go{{ go_version }}.linux-amd64.tar.gz dest=~/go creates=~/go copy=no + unarchive: src=https://storage.googleapis.com/golang/go{{ go_version }}.linux-amd64.tar.gz dest=~/ creates=~/go copy=no - name: add variables to variables /etc/profile copy: src=golang.sh dest=/etc/profile.d/golang.sh become: yes - - name: get cgrates - git: repo=https://github.com/cgrates/cgrates.git dest=/home/vagrant/code/src/github.com/cgrates/cgrates - - - name: get glide - shell: GOROOT=/home/vagrant/go GOPATH=/home/vagrant/code ~/go/bin/go get -u -v github.com/Masterminds/glide - - - name: install cgrates - shell: cd /home/vagrant/code/src/github.com/cgrates/cgrates; ~/code/bin/glide install - - - name: create cgr-engine link - file: src=/home/vagrant/code/bin/cgr-engine dest=/usr/bin/cgr-engine state=link - become: yes - - name: create a link to data dir - become: yes file: src=/home/vagrant/code/src/github.com/cgrates/cgrates/data dest=/usr/share/cgrates state=link + become: yes - - name: expand freeswitch json conf - command: tar -xzvf /usr/share/cgrates/tutorials/fs_json/freeswitch/etc/freeswitch_conf.tar.gz - - - name: expand freeswitch csv conf - command: tar -xzvf /usr/share/cgrates/tutorials/fs_csv/freeswitch/etc/freeswitch_conf.tar.gz - - - name: setup database tables + #- name: expand freeswitch json conf + # unarchive: src=/usr/share/cgrates/tutorials/fs_json/freeswitch/etc/freeswitch_conf.tar.gz dest=/usr/share/cgrates/tutorials/fs_json/freeswitch/etc/ copy=no + + #- name: expand freeswitch csv conf + # unarchive: src=/usr/share/cgrates/tutorials/fs_csv/freeswitch/etc/freeswitch_conf.tar.gz dest=/usr/share/cgrates/tutorials/fs_json/freeswitch/etc/ copy=no + + - name: setup mysql tables shell: chdir=/usr/share/cgrates/storage/mysql ./setup_cgr_db.sh root {{ root_db_password }} localhost - + - name: setup postgress table + shell: chdir=/usr/share/cgrates/storage/postgres ./setup_cgr_db.sh + + - name: create cgrates user for mongo + mongodb_user: database=admin name=cgrates password={{root_db_password}} roles='userAdminAnyDatabase' state=present + - name: create link to cgrates dir file: src=~/code/src/github.com/cgrates/cgrates dest=~/cgr state=link + - name: create var folder + file: path=/var/log/cgrates state=directory owner=vagrant + become: yes diff --git a/engine/account.go b/engine/account.go index e231875a2..d70a974d3 100644 --- a/engine/account.go +++ b/engine/account.go @@ -127,7 +127,7 @@ func (acc *Account) setBalanceAction(a *Action) error { if a.Balance.ID != nil && *a.Balance.ID == utils.META_DEFAULT { balance.ID = utils.META_DEFAULT if a.Balance.Value != nil { - balance.Value = *a.Balance.Value + balance.Value = a.Balance.GetValue() } } else { a.Balance.ModifyBalance(balance) @@ -170,6 +170,7 @@ func (ub *Account) debitBalanceAction(a *Action, reset bool) error { return errors.New("nil action") } bClone := a.Balance.CreateBalance() + //log.Print("Bclone: ", utils.ToJSON(a.Balance)) if bClone == nil { return errors.New("nil balance") } @@ -190,6 +191,7 @@ func (ub *Account) debitBalanceAction(a *Action, reset bool) error { b.SubstractValue(bClone.GetValue()) b.dirty = true found = true + a.balanceValue = b.GetValue() } } // if it is not found then we add it to the list @@ -207,7 +209,7 @@ func (ub *Account) debitBalanceAction(a *Action, reset bool) error { } } bClone.dirty = true // Mark the balance as dirty since we have modified and it should be checked by action triggers - + a.balanceValue = bClone.GetValue() bClone.Uuid = utils.GenUUID() // alway overwrite the uuid for consistency // load ValueFactor if defined in extra parametrs if a.ExtraParameters != "" { @@ -287,7 +289,7 @@ func (ub *Account) getBalancesForPrefix(prefix, category, direction, tor string, if b.Disabled { continue } - if b.IsExpired() || (len(b.SharedGroups) == 0 && b.GetValue() <= 0) { + if b.IsExpired() || (len(b.SharedGroups) == 0 && b.GetValue() <= 0 && !b.Blocker) { continue } if sharedGroup != "" && b.SharedGroups[sharedGroup] == false { @@ -305,14 +307,20 @@ func (ub *Account) getBalancesForPrefix(prefix, category, direction, tor string, if x, err := cache2go.Get(utils.DESTINATION_PREFIX + p); err == nil { destIds := x.(map[interface{}]struct{}) for dId, _ := range destIds { - if b.DestinationIDs[dId.(string)] == true { - b.precision = len(p) - usefulBalances = append(usefulBalances, b) - break + includeDest, found := b.DestinationIDs[dId.(string)] + if found { + if includeDest { + b.precision = len(p) + usefulBalances = append(usefulBalances, b) + break + } else { // the balance had !, so now equals false => exclude balance + b.precision = 1 // fake to exit the outer loop + break + } } - if b.precision > 0 { + /*if b.precision > 0 { break - } + }*/ } } if b.precision > 0 { diff --git a/engine/account_test.go b/engine/account_test.go index 90be903d2..7abd8d6c1 100644 --- a/engine/account_test.go +++ b/engine/account_test.go @@ -856,7 +856,7 @@ func TestAccountdebitBalanceExists(t *testing.T) { BalanceMap: map[string]Balances{utils.SMS: Balances{&Balance{Value: 14}}, utils.DATA: Balances{&Balance{Value: 1024}}, utils.VOICE: Balances{&Balance{Value: 15, Weight: 20, DestinationIDs: utils.StringMap{"NAT": true}, Directions: utils.NewStringMap(utils.OUT)}, &Balance{Weight: 10, DestinationIDs: utils.StringMap{"RET": true}}}}, } newMb := &BalanceFilter{ - Value: utils.Float64Pointer(-10), + Value: &utils.ValueFormula{Static: -10}, Type: utils.StringPointer(utils.VOICE), Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.StringMap{"NAT": true}), @@ -883,19 +883,19 @@ func TestAccountAddMinuteNil(t *testing.T) { func TestAccountAddMinutBucketEmpty(t *testing.T) { mb1 := &BalanceFilter{ - Value: utils.Float64Pointer(-10), + Value: &utils.ValueFormula{Static: -10}, Type: utils.StringPointer(utils.VOICE), DestinationIDs: utils.StringMapPointer(utils.StringMap{"NAT": true}), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), } mb2 := &BalanceFilter{ - Value: utils.Float64Pointer(-10), + Value: &utils.ValueFormula{Static: -10}, Type: utils.StringPointer(utils.VOICE), DestinationIDs: utils.StringMapPointer(utils.StringMap{"NAT": true}), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), } mb3 := &BalanceFilter{ - Value: utils.Float64Pointer(-10), + Value: &utils.ValueFormula{Static: -10}, Type: utils.StringPointer(utils.VOICE), DestinationIDs: utils.StringMapPointer(utils.StringMap{"OTHER": true}), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), diff --git a/engine/action.go b/engine/action.go index fff13356c..5a3c8e754 100644 --- a/engine/action.go +++ b/engine/action.go @@ -32,6 +32,7 @@ import ( "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) /* @@ -45,6 +46,7 @@ type Action struct { ExpirationString string // must stay as string because it can have relative values like 1month Weight float64 Balance *BalanceFilter + balanceValue float64 // balance value after action execution, used with cdrlog } const ( @@ -73,6 +75,7 @@ const ( CDRLOG = "*cdrlog" SET_DDESTINATIONS = "*set_ddestinations" TRANSFER_MONETARY_DEFAULT = "*transfer_monetary_default" + CGR_RPC = "*cgr_rpc" ) func (a *Action) Clone() *Action { @@ -90,57 +93,36 @@ func (a *Action) Clone() *Action { type actionTypeFunc func(*Account, *StatsQueueTriggered, *Action, Actions) error func getActionFunc(typ string) (actionTypeFunc, bool) { - switch typ { - case LOG: - return logAction, true - case CDRLOG: - return cdrLogAction, true - case RESET_TRIGGERS: - return resetTriggersAction, true - case SET_RECURRENT: - return setRecurrentAction, true - case UNSET_RECURRENT: - return unsetRecurrentAction, true - case ALLOW_NEGATIVE: - return allowNegativeAction, true - case DENY_NEGATIVE: - return denyNegativeAction, true - case RESET_ACCOUNT: - return resetAccountAction, true - case TOPUP_RESET: - return topupResetAction, true - case TOPUP: - return topupAction, true - case DEBIT_RESET: - return debitResetAction, true - case DEBIT: - return debitAction, true - case RESET_COUNTERS: - return resetCountersAction, true - case ENABLE_ACCOUNT: - return enableUserAction, true - case DISABLE_ACCOUNT: - return disableUserAction, true - //case ENABLE_DISABLE_BALANCE: - // return enableDisableBalanceAction, true - case CALL_URL: - return callUrl, true - case CALL_URL_ASYNC: - return callUrlAsync, true - case MAIL_ASYNC: - return mailAsync, true - case SET_DDESTINATIONS: - return setddestinations, true - case REMOVE_ACCOUNT: - return removeAccountAction, true - case REMOVE_BALANCE: - return removeBalanceAction, true - case SET_BALANCE: - return setBalanceAction, true - case TRANSFER_MONETARY_DEFAULT: - return transferMonetaryDefaultAction, true + actionFuncMap := map[string]actionTypeFunc{ + LOG: logAction, + CDRLOG: cdrLogAction, + RESET_TRIGGERS: resetTriggersAction, + SET_RECURRENT: setRecurrentAction, + UNSET_RECURRENT: unsetRecurrentAction, + ALLOW_NEGATIVE: allowNegativeAction, + DENY_NEGATIVE: denyNegativeAction, + RESET_ACCOUNT: resetAccountAction, + TOPUP_RESET: topupResetAction, + TOPUP: topupAction, + DEBIT_RESET: debitResetAction, + DEBIT: debitAction, + RESET_COUNTERS: resetCountersAction, + ENABLE_ACCOUNT: enableUserAction, + DISABLE_ACCOUNT: disableUserAction, + //case ENABLE_DISABLE_BALANCE: + // return enableDisableBalanceAction, true + CALL_URL: callUrl, + CALL_URL_ASYNC: callUrlAsync, + MAIL_ASYNC: mailAsync, + SET_DDESTINATIONS: setddestinations, + REMOVE_ACCOUNT: removeAccountAction, + REMOVE_BALANCE: removeBalanceAction, + SET_BALANCE: setBalanceAction, + TRANSFER_MONETARY_DEFAULT: transferMonetaryDefaultAction, + CGR_RPC: cgrRPCAction, } - return nil, false + f, exists := actionFuncMap[typ] + return f, exists } func logAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) (err error) { @@ -181,6 +163,8 @@ func parseTemplateValue(rsrFlds utils.RSRFields, acnt *Account, action *Action) parsedValue += rsrFld.ParseValue(action.Id) case "ActionType": parsedValue += rsrFld.ParseValue(action.ActionType) + case "ActionValue": + parsedValue += rsrFld.ParseValue(strconv.FormatFloat(b.GetValue(), 'f', -1, 64)) case "BalanceType": parsedValue += rsrFld.ParseValue(action.Balance.GetType()) case "BalanceUUID": @@ -188,7 +172,7 @@ func parseTemplateValue(rsrFlds utils.RSRFields, acnt *Account, action *Action) case "BalanceID": parsedValue += rsrFld.ParseValue(b.ID) case "BalanceValue": - parsedValue += rsrFld.ParseValue(strconv.FormatFloat(b.GetValue(), 'f', -1, 64)) + parsedValue += rsrFld.ParseValue(strconv.FormatFloat(action.balanceValue, 'f', -1, 64)) case "DestinationIDs": parsedValue += rsrFld.ParseValue(b.DestinationIDs.String()) case "ExtraParameters": @@ -215,7 +199,7 @@ func cdrLogAction(acc *Account, sq *StatsQueueTriggered, a *Action, acs Actions) utils.TENANT: utils.ParseRSRFieldsMustCompile(utils.TENANT, utils.INFIELD_SEP), utils.ACCOUNT: utils.ParseRSRFieldsMustCompile(utils.ACCOUNT, utils.INFIELD_SEP), utils.SUBJECT: utils.ParseRSRFieldsMustCompile(utils.ACCOUNT, utils.INFIELD_SEP), - utils.COST: utils.ParseRSRFieldsMustCompile("BalanceValue", utils.INFIELD_SEP), + utils.COST: utils.ParseRSRFieldsMustCompile("ActionValue", utils.INFIELD_SEP), } template := make(map[string]string) @@ -329,7 +313,9 @@ func topupResetAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actio } c := a.Clone() genericMakeNegative(c) - return genericDebit(ub, c, true) + err = genericDebit(ub, c, true) + a.balanceValue = c.balanceValue + return } func topupAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) (err error) { @@ -338,7 +324,9 @@ func topupAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) ( } c := a.Clone() genericMakeNegative(c) - return genericDebit(ub, c, false) + err = genericDebit(ub, c, false) + a.balanceValue = c.balanceValue + return } func debitResetAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) (err error) { @@ -370,7 +358,7 @@ func resetCountersAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Ac } func genericMakeNegative(a *Action) { - if a.Balance != nil && a.Balance.GetValue() >= 0 { // only apply if not allready negative + if a.Balance != nil && a.Balance.GetValue() > 0 { // only apply if not allready negative a.Balance.SetValue(-a.Balance.GetValue()) } } @@ -426,9 +414,13 @@ func callUrl(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) error if sq != nil { o = sq } + jsn, err := json.Marshal(o) + if err != nil { + return err + } cfg := config.CgrConfig() fallbackPath := path.Join(cfg.HttpFailedDir, fmt.Sprintf("act_%s_%s_%s.json", a.ActionType, a.ExtraParameters, utils.GenUUID())) - _, err := utils.HttpPoster(a.ExtraParameters, cfg.HttpSkipTlsVerify, o, utils.CONTENT_JSON, 1, fallbackPath) + _, err = utils.HttpPoster(a.ExtraParameters, cfg.HttpSkipTlsVerify, jsn, utils.CONTENT_JSON, 1, fallbackPath) return err } @@ -441,9 +433,13 @@ func callUrlAsync(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) if sq != nil { o = sq } + jsn, err := json.Marshal(o) + if err != nil { + return err + } cfg := config.CgrConfig() fallbackPath := path.Join(cfg.HttpFailedDir, fmt.Sprintf("act_%s_%s_%s.json", a.ActionType, a.ExtraParameters, utils.GenUUID())) - go utils.HttpPoster(a.ExtraParameters, cfg.HttpSkipTlsVerify, o, utils.CONTENT_JSON, 3, fallbackPath) + go utils.HttpPoster(a.ExtraParameters, cfg.HttpSkipTlsVerify, jsn, utils.CONTENT_JSON, 3, fallbackPath) return nil } @@ -632,6 +628,49 @@ func transferMonetaryDefaultAction(acc *Account, sq *StatsQueueTriggered, a *Act return nil } +type RPCRequest struct { + Address string + Transport string + Method string + Attempts int + Async bool + Params map[string]interface{} +} + +func cgrRPCAction(account *Account, sq *StatsQueueTriggered, a *Action, acs Actions) error { + req := RPCRequest{} + if err := json.Unmarshal([]byte(a.ExtraParameters), &req); err != nil { + return err + } + params, err := utils.GetRpcParams(req.Method) + if err != nil { + return err + } + var client rpcclient.RpcClientConnection + if req.Address != utils.MetaInternal { + if client, err = rpcclient.NewRpcClient("tcp", req.Address, req.Attempts, 0, req.Transport, nil); err != nil { + return err + } + } else { + client = params.Object.(rpcclient.RpcClientConnection) + } + in, out := params.InParam, params.OutParam + p, err := utils.FromMapStringInterfaceValue(req.Params, in) + if err != nil { + return err + } + if !req.Async { + err = client.Call(req.Method, p, out) + utils.Logger.Info(fmt.Sprintf("<*cgr_rpc> result: %s err: %v", utils.ToJSON(out), err)) + return err + } + go func() { + err := client.Call(req.Method, p, out) + utils.Logger.Info(fmt.Sprintf("<*cgr_rpc> result: %s err: %v", utils.ToJSON(out), err)) + }() + return nil +} + // Structure to store actions according to weight type Actions []*Action diff --git a/engine/action_plan.go b/engine/action_plan.go index 79095227c..563ea9413 100644 --- a/engine/action_plan.go +++ b/engine/action_plan.go @@ -291,7 +291,6 @@ func (at *ActionTiming) Execute() (err error) { transactionFailed := false removeAccountActionFound := false for _, a := range aac { - //log.Print("A: ", utils.ToJSON(a)) // check action filter if len(a.Filter) > 0 { matched, err := acc.matchActionFilter(a.Filter) diff --git a/engine/actions_test.go b/engine/actions_test.go index 1fea3f47d..26c358c8d 100644 --- a/engine/actions_test.go +++ b/engine/actions_test.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "testing" "time" @@ -412,7 +413,7 @@ func TestActionPlanLogFunction(t *testing.T) { ActionType: "*log", Balance: &BalanceFilter{ Type: utils.StringPointer("test"), - Value: utils.Float64Pointer(1.1), + Value: &utils.ValueFormula{Static: 1.1}, }, } at := &ActionTiming{ @@ -429,7 +430,7 @@ func TestActionPlanFunctionNotAvailable(t *testing.T) { ActionType: "VALID_FUNCTION_TYPE", Balance: &BalanceFilter{ Type: utils.StringPointer("test"), - Value: utils.Float64Pointer(1.1), + Value: &utils.ValueFormula{Static: 1.1}, }, } at := &ActionTiming{ @@ -658,7 +659,7 @@ func TestActionTriggerMatchAll(t *testing.T) { Type: utils.StringPointer(utils.MONETARY), RatingSubject: utils.StringPointer("test1"), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), - Value: utils.Float64Pointer(2), + Value: &utils.ValueFormula{Static: 2}, Weight: utils.Float64Pointer(1.0), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), SharedGroups: utils.StringMapPointer(utils.NewStringMap("test2")), @@ -668,7 +669,7 @@ func TestActionTriggerMatchAll(t *testing.T) { Type: utils.StringPointer(utils.MONETARY), RatingSubject: utils.StringPointer("test1"), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), - Value: utils.Float64Pointer(2), + Value: &utils.ValueFormula{Static: 2}, Weight: utils.Float64Pointer(1.0), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), SharedGroups: utils.StringMapPointer(utils.NewStringMap("test2")), @@ -798,7 +799,7 @@ func TestActionTopupResetCredit(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1, Filter: &BalanceFilter{Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupResetAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.MONETARY].GetTotalValue() != 10 || @@ -817,7 +818,7 @@ func TestActionTopupValueFactor(t *testing.T) { a := &Action{ Balance: &BalanceFilter{ Type: utils.StringPointer(utils.MONETARY), - Value: utils.Float64Pointer(10), + Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), }, ExtraParameters: `{"*monetary":2.0}`, @@ -838,7 +839,7 @@ func TestActionTopupResetCreditId(t *testing.T) { }, }, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), ID: utils.StringPointer("TEST_B"), Value: utils.Float64Pointer(10), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), ID: utils.StringPointer("TEST_B"), Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupResetAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.MONETARY].GetTotalValue() != 110 || @@ -857,7 +858,7 @@ func TestActionTopupResetCreditNoId(t *testing.T) { }, }, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupResetAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.MONETARY].GetTotalValue() != 20 || @@ -875,7 +876,7 @@ func TestActionTopupResetMinutes(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1, Filter: &BalanceFilter{Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: utils.Float64Pointer(5), Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: &utils.ValueFormula{Static: 5}, Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupResetAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.VOICE].GetTotalValue() != 5 || @@ -894,7 +895,7 @@ func TestActionTopupCredit(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1, Filter: &BalanceFilter{Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.MONETARY].GetTotalValue() != 110 || @@ -912,7 +913,7 @@ func TestActionTopupMinutes(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY)}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY)}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: utils.Float64Pointer(5), Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: &utils.ValueFormula{Static: 5}, Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} topupAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.VOICE].GetTotalValue() != 15 || @@ -931,7 +932,7 @@ func TestActionDebitCredit(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1, Filter: &BalanceFilter{Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} debitAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.MONETARY].GetTotalValue() != 90 || @@ -949,7 +950,7 @@ func TestActionDebitMinutes(t *testing.T) { UnitCounters: UnitCounters{utils.MONETARY: []*UnitCounter{&UnitCounter{Counters: CounterFilters{&CounterFilter{Value: 1}}}}}, ActionTriggers: ActionTriggers{&ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY)}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}, &ActionTrigger{Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY)}, ThresholdValue: 2, ActionsID: "TEST_ACTIONS", Executed: true}}, } - a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: utils.Float64Pointer(5), Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} + a := &Action{Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Value: &utils.ValueFormula{Static: 5}, Weight: utils.Float64Pointer(20), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT))}} debitAction(ub, nil, a, nil) if ub.AllowNegative || ub.BalanceMap[utils.VOICE][0].GetValue() != 5 || @@ -1118,7 +1119,7 @@ func TestActionPlanLogging(t *testing.T) { } func TestActionMakeNegative(t *testing.T) { - a := &Action{Balance: &BalanceFilter{Value: utils.Float64Pointer(10)}} + a := &Action{Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 10}}} genericMakeNegative(a) if a.Balance.GetValue() > 0 { t.Error("Failed to make negative: ", a) @@ -1152,7 +1153,7 @@ func TestTopupAction(t *testing.T) { initialUb, _ := accountingStorage.GetAccount("vdf:minu") a := &Action{ ActionType: TOPUP, - Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), Weight: utils.Float64Pointer(20)}, } at := &ActionTiming{ @@ -1173,7 +1174,7 @@ func TestTopupActionLoaded(t *testing.T) { initialUb, _ := accountingStorage.GetAccount("vdf:minitsboy") a := &Action{ ActionType: TOPUP, - Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), Weight: utils.Float64Pointer(20)}, } at := &ActionTiming{ @@ -1200,7 +1201,7 @@ func TestActionCdrlogEmpty(t *testing.T) { err := cdrLogAction(acnt, nil, cdrlog, Actions{ &Action{ ActionType: DEBIT, - Balance: &BalanceFilter{Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, }, }) if err != nil { @@ -1222,11 +1223,11 @@ func TestActionCdrlogWithParams(t *testing.T) { err := cdrLogAction(acnt, nil, cdrlog, Actions{ &Action{ ActionType: DEBIT, - Balance: &BalanceFilter{Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, }, &Action{ ActionType: DEBIT_RESET, - Balance: &BalanceFilter{Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, }, }) if err != nil { @@ -1249,11 +1250,11 @@ func TestActionCdrLogParamsWithOverload(t *testing.T) { err := cdrLogAction(acnt, nil, cdrlog, Actions{ &Action{ ActionType: DEBIT, - Balance: &BalanceFilter{Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, }, &Action{ ActionType: DEBIT_RESET, - Balance: &BalanceFilter{Value: utils.Float64Pointer(25), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 25}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("RET")), Weight: utils.Float64Pointer(20)}, }, }) if err != nil { @@ -1334,11 +1335,11 @@ func TestActionTransactionFuncType(t *testing.T) { actions: []*Action{ &Action{ ActionType: TOPUP, - Balance: &BalanceFilter{Value: utils.Float64Pointer(1.1), Type: utils.StringPointer(utils.MONETARY)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 1.1}, Type: utils.StringPointer(utils.MONETARY)}, }, &Action{ ActionType: "VALID_FUNCTION_TYPE", - Balance: &BalanceFilter{Value: utils.Float64Pointer(1.1), Type: utils.StringPointer("test")}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 1.1}, Type: utils.StringPointer("test")}, }, }, } @@ -1370,7 +1371,7 @@ func TestActionTransactionBalanceType(t *testing.T) { actions: []*Action{ &Action{ ActionType: TOPUP, - Balance: &BalanceFilter{Value: utils.Float64Pointer(1.1), Type: utils.StringPointer(utils.MONETARY)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 1.1}, Type: utils.StringPointer(utils.MONETARY)}, }, &Action{ ActionType: TOPUP, @@ -1406,7 +1407,7 @@ func TestActionTransactionBalanceNotType(t *testing.T) { actions: []*Action{ &Action{ ActionType: TOPUP, - Balance: &BalanceFilter{Value: utils.Float64Pointer(1.1), Type: utils.StringPointer(utils.VOICE)}, + Balance: &BalanceFilter{Value: &utils.ValueFormula{Static: 1.1}, Type: utils.StringPointer(utils.VOICE)}, }, &Action{ ActionType: TOPUP, @@ -1444,14 +1445,14 @@ func TestActionWithExpireWithoutExpire(t *testing.T) { ActionType: TOPUP, Balance: &BalanceFilter{ Type: utils.StringPointer(utils.VOICE), - Value: utils.Float64Pointer(15), + Value: &utils.ValueFormula{Static: 15}, }, }, &Action{ ActionType: TOPUP, Balance: &BalanceFilter{ Type: utils.StringPointer(utils.VOICE), - Value: utils.Float64Pointer(30), + Value: &utils.ValueFormula{Static: 30}, ExpirationDate: utils.TimePointer(time.Date(2025, time.November, 11, 22, 39, 0, 0, time.UTC)), }, }, @@ -1671,7 +1672,7 @@ func TestActionConditionalTopup(t *testing.T) { Filter: `{"Type":"*monetary","Value":1,"Weight":10}`, Balance: &BalanceFilter{ Type: utils.StringPointer(utils.MONETARY), - Value: utils.Float64Pointer(11), + Value: &utils.ValueFormula{Static: 11}, Weight: utils.Float64Pointer(30), }, } @@ -1735,7 +1736,7 @@ func TestActionConditionalTopupNoMatch(t *testing.T) { Filter: `{"Type":"*monetary","Value":2,"Weight":10}`, Balance: &BalanceFilter{ Type: utils.StringPointer(utils.MONETARY), - Value: utils.Float64Pointer(11), + Value: &utils.ValueFormula{Static: 11}, Weight: utils.Float64Pointer(30), }, } @@ -1799,7 +1800,7 @@ func TestActionConditionalTopupExistingBalance(t *testing.T) { Filter: `{"Type":"*voice","Value":{"*gte":100}}`, Balance: &BalanceFilter{ Type: utils.StringPointer(utils.MONETARY), - Value: utils.Float64Pointer(11), + Value: &utils.ValueFormula{Static: 11}, Weight: utils.Float64Pointer(10), }, } @@ -2020,7 +2021,7 @@ func TestActionSetBalance(t *testing.T) { Balance: &BalanceFilter{ ID: utils.StringPointer("m2"), Type: utils.StringPointer(utils.MONETARY), - Value: utils.Float64Pointer(11), + Value: &utils.ValueFormula{Static: 11}, Weight: utils.Float64Pointer(10), }, } @@ -2097,6 +2098,148 @@ func TestActionExpNoExp(t *testing.T) { } } +func TestActionCdrlogBalanceValue(t *testing.T) { + err := accountingStorage.SetAccount(&Account{ + ID: "cgrates.org:bv", + BalanceMap: map[string]Balances{ + utils.MONETARY: Balances{&Balance{ + ID: "*default", + Uuid: "25a02c82-f09f-4c6e-bacf-8ed4b076475a", + Value: 10, + }}, + }, + }) + if err != nil { + t.Error("Error setting account: ", err) + } + at := &ActionTiming{ + accountIDs: utils.StringMap{"cgrates.org:bv": true}, + Timing: &RateInterval{}, + actions: []*Action{ + &Action{ + Id: "RECUR_FOR_V3HSILLMILLD1G", + ActionType: TOPUP, + Balance: &BalanceFilter{ + ID: utils.StringPointer("*default"), + Uuid: utils.StringPointer("25a02c82-f09f-4c6e-bacf-8ed4b076475a"), + Value: &utils.ValueFormula{Static: 1.1}, + Type: utils.StringPointer(utils.MONETARY), + }, + }, + &Action{ + Id: "RECUR_FOR_V3HSILLMILLD5G", + ActionType: DEBIT, + Balance: &BalanceFilter{ + ID: utils.StringPointer("*default"), + Uuid: utils.StringPointer("25a02c82-f09f-4c6e-bacf-8ed4b076475a"), + Value: &utils.ValueFormula{Static: 2.1}, + Type: utils.StringPointer(utils.MONETARY), + }, + }, + &Action{ + Id: "c", + ActionType: CDRLOG, + ExtraParameters: `{"BalanceID":"BalanceID","BalanceUUID":"BalanceUUID","ActionID":"ActionID","BalanceValue":"BalanceValue"}`, + }, + }, + } + err = at.Execute() + acc, err := accountingStorage.GetAccount("cgrates.org:bv") + if err != nil || acc == nil { + t.Error("Error getting account: ", acc, err) + } + if acc.BalanceMap[utils.MONETARY][0].Value != 9 { + t.Errorf("Transaction didn't work: %v", acc.BalanceMap[utils.MONETARY][0].Value) + } + cdrs := make([]*CDR, 0) + json.Unmarshal([]byte(at.actions[2].ExpirationString), &cdrs) + if len(cdrs) != 2 || + cdrs[0].ExtraFields["BalanceValue"] != "11.1" || + cdrs[1].ExtraFields["BalanceValue"] != "9" { + t.Errorf("Wrong cdrlogs: %", utils.ToIJSON(cdrs)) + } +} + +type TestRPCParameters struct { + status string +} + +type Attr struct { + Name string + Surname string + Age float64 +} + +func (trpcp *TestRPCParameters) Hopa(in Attr, out *float64) error { + trpcp.status = utils.OK + return nil +} + +func (trpcp *TestRPCParameters) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(trpcp).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented + } + + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} + + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err +} + +func TestCgrRpcAction(t *testing.T) { + trpcp := &TestRPCParameters{} + utils.RegisterRpcParams("", trpcp) + a := &Action{ + ExtraParameters: `{"Address": "*internal", + "Transport": "*gob", + "Method": "TestRPCParameters.Hopa", + "Attempts":1, + "Async" :false, + "Params": {"Name":"n", "Surname":"s", "Age":10.2}}`, + } + if err := cgrRPCAction(nil, nil, a, nil); err != nil { + t.Error("error executing cgr action: ", err) + } + if trpcp.status != utils.OK { + t.Error("RPC not called!") + } +} + +func TestValueFormulaDebit(t *testing.T) { + if _, err := accountingStorage.GetAccount("cgrates.org:vf"); err != nil { + t.Errorf("account to be removed not found: %v", err) + } + + at := &ActionTiming{ + accountIDs: utils.StringMap{"cgrates.org:vf": true}, + ActionsID: "VF", + } + at.Execute() + afterUb, err := accountingStorage.GetAccount("cgrates.org:vf") + // not an exact value, depends of month + v := afterUb.BalanceMap[utils.MONETARY].GetTotalValue() + if err != nil || v > -0.30 || v < -0.35 { + t.Error("error debiting account: ", err, utils.ToIJSON(afterUb)) + } +} + /**************** Benchmarks ********************************/ func BenchmarkUUID(b *testing.B) { diff --git a/engine/aliases.go b/engine/aliases.go index 23411392a..a927cc9b8 100644 --- a/engine/aliases.go +++ b/engine/aliases.go @@ -12,7 +12,7 @@ import ( ) // Temporary export AliasService for the ApierV1 to be able to emulate old APIs -func GetAliasService() AliasService { +func GetAliasService() rpcclient.RpcClientConnection { return aliasService } @@ -177,63 +177,59 @@ func NewAliasHandler(accountingDb AccountingStorage) *AliasHandler { } } -// SetAlias will set/overwrite specified alias -func (am *AliasHandler) SetAlias(al Alias, reply *string) error { - am.mu.Lock() - defer am.mu.Unlock() - - if err := am.accountingDb.SetAlias(&al); err != nil { - *reply = err.Error() - return err - } //add to cache - - aliasesChanged := []string{utils.ALIASES_PREFIX + al.GetId()} - if err := am.accountingDb.CacheAccountingPrefixValues(map[string][]string{utils.ALIASES_PREFIX: aliasesChanged}); err != nil { - return utils.NewErrServerError(err) - } - *reply = utils.OK - return nil +type AttrAddAlias struct { + Alias *Alias + Overwrite bool } -func (am *AliasHandler) UpdateAlias(al Alias, reply *string) error { +// SetAlias will set/overwrite specified alias +func (am *AliasHandler) SetAlias(attr *AttrAddAlias, reply *string) error { am.mu.Lock() defer am.mu.Unlock() - // get previous value - oldAlias, err := am.accountingDb.GetAlias(al.GetId(), false) - if err != nil { - return err + + var oldAlias *Alias + if !attr.Overwrite { // get previous value + oldAlias, _ = am.accountingDb.GetAlias(attr.Alias.GetId(), false) } - for _, value := range al.Values { - found := false - if value.DestinationId == "" { - value.DestinationId = utils.ANY + + if attr.Overwrite || oldAlias == nil { + if err := am.accountingDb.SetAlias(attr.Alias); err != nil { + *reply = err.Error() + return err } - for _, oldValue := range oldAlias.Values { - if oldValue.DestinationId == value.DestinationId { - for target, origAliasMap := range value.Pairs { - for orig, alias := range origAliasMap { - if oldValue.Pairs[target] == nil { - oldValue.Pairs[target] = make(map[string]string) + } else { + for _, value := range attr.Alias.Values { + found := false + if value.DestinationId == "" { + value.DestinationId = utils.ANY + } + for _, oldValue := range oldAlias.Values { + if oldValue.DestinationId == value.DestinationId { + for target, origAliasMap := range value.Pairs { + for orig, alias := range origAliasMap { + if oldValue.Pairs[target] == nil { + oldValue.Pairs[target] = make(map[string]string) + } + oldValue.Pairs[target][orig] = alias } - oldValue.Pairs[target][orig] = alias } + oldValue.Weight = value.Weight + found = true + break } - oldValue.Weight = value.Weight - found = true - break + } + if !found { + oldAlias.Values = append(oldAlias.Values, value) } } - if !found { - oldAlias.Values = append(oldAlias.Values, value) + if err := am.accountingDb.SetAlias(oldAlias); err != nil { + *reply = err.Error() + return err } } - if err := am.accountingDb.SetAlias(oldAlias); err != nil { - *reply = err.Error() - return err - } //add to cache - - aliasesChanged := []string{utils.ALIASES_PREFIX + al.GetId()} + //add to cache + aliasesChanged := []string{utils.ALIASES_PREFIX + attr.Alias.GetId()} if err := am.accountingDb.CacheAccountingPrefixValues(map[string][]string{utils.ALIASES_PREFIX: aliasesChanged}); err != nil { return utils.NewErrServerError(err) } @@ -241,7 +237,7 @@ func (am *AliasHandler) UpdateAlias(al Alias, reply *string) error { return nil } -func (am *AliasHandler) RemoveAlias(al Alias, reply *string) error { +func (am *AliasHandler) RemoveAlias(al *Alias, reply *string) error { am.mu.Lock() defer am.mu.Unlock() if err := am.accountingDb.RemoveAlias(al.GetId()); err != nil { @@ -252,7 +248,7 @@ func (am *AliasHandler) RemoveAlias(al Alias, reply *string) error { return nil } -func (am *AliasHandler) RemoveReverseAlias(attr AttrReverseAlias, reply *string) error { +func (am *AliasHandler) RemoveReverseAlias(attr *AttrReverseAlias, reply *string) error { am.mu.Lock() defer am.mu.Unlock() rKey := utils.REVERSE_ALIASES_PREFIX + attr.Alias + attr.Target + attr.Context @@ -275,7 +271,7 @@ func (am *AliasHandler) RemoveReverseAlias(attr AttrReverseAlias, reply *string) return nil } -func (am *AliasHandler) GetAlias(al Alias, result *Alias) error { +func (am *AliasHandler) GetAlias(al *Alias, result *Alias) error { am.mu.RLock() defer am.mu.RUnlock() variants := al.GenerateIds() @@ -288,7 +284,7 @@ func (am *AliasHandler) GetAlias(al Alias, result *Alias) error { return utils.ErrNotFound } -func (am *AliasHandler) GetReverseAlias(attr AttrReverseAlias, result *map[string][]*Alias) error { +func (am *AliasHandler) GetReverseAlias(attr *AttrReverseAlias, result *map[string][]*Alias) error { am.mu.Lock() defer am.mu.Unlock() aliases := make(map[string][]*Alias) @@ -315,9 +311,9 @@ func (am *AliasHandler) GetReverseAlias(attr AttrReverseAlias, result *map[strin return nil } -func (am *AliasHandler) GetMatchingAlias(attr AttrMatchingAlias, result *string) error { +func (am *AliasHandler) GetMatchingAlias(attr *AttrMatchingAlias, result *string) error { response := Alias{} - if err := am.GetAlias(Alias{ + if err := am.GetAlias(&Alias{ Direction: attr.Direction, Tenant: attr.Tenant, Category: attr.Category, @@ -369,48 +365,32 @@ func (am *AliasHandler) GetMatchingAlias(attr AttrMatchingAlias, result *string) return utils.ErrNotFound } -type ProxyAliasService struct { - Client *rpcclient.RpcClient -} - -func NewProxyAliasService(addr string, attempts, reconnects int) (*ProxyAliasService, error) { - client, err := rpcclient.NewRpcClient("tcp", addr, attempts, reconnects, utils.GOB, nil) - if err != nil { - return nil, err +func (am *AliasHandler) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(am).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented } - return &ProxyAliasService{Client: client}, nil -} -func (ps *ProxyAliasService) SetAlias(al Alias, reply *string) error { - return ps.Client.Call("AliasesV1.SetAlias", al, reply) -} + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} -func (ps *ProxyAliasService) UpdateAlias(al Alias, reply *string) error { - return ps.Client.Call("AliasesV1.UpdateAlias", al, reply) -} - -func (ps *ProxyAliasService) RemoveAlias(al Alias, reply *string) error { - return ps.Client.Call("AliasesV1.RemoveAlias", al, reply) -} - -func (ps *ProxyAliasService) GetAlias(al Alias, alias *Alias) error { - return ps.Client.Call("AliasesV1.GetAlias", al, alias) -} - -func (ps *ProxyAliasService) GetMatchingAlias(attr AttrMatchingAlias, alias *string) error { - return ps.Client.Call("AliasesV1.GetMatchingAlias", attr, alias) -} - -func (ps *ProxyAliasService) GetReverseAlias(attr AttrReverseAlias, alias *map[string][]*Alias) error { - return ps.Client.Call("AliasesV1.GetReverseAlias", attr, alias) -} - -func (ps *ProxyAliasService) RemoveReverseAlias(attr AttrReverseAlias, reply *string) error { - return ps.Client.Call("AliasesV1.RemoveReverseAlias", attr, reply) -} - -func (ps *ProxyAliasService) ReloadAliases(in string, reply *string) error { - return ps.Client.Call("AliasesV1.ReloadAliases", in, reply) + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err } func LoadAlias(attr *AttrMatchingAlias, in interface{}, extraFields string) error { @@ -418,7 +398,7 @@ func LoadAlias(attr *AttrMatchingAlias, in interface{}, extraFields string) erro return nil } response := Alias{} - if err := aliasService.GetAlias(Alias{ + if err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: attr.Direction, Tenant: attr.Tenant, Category: attr.Category, diff --git a/engine/aliases_test.go b/engine/aliases_test.go index a95e40b1e..43708d128 100644 --- a/engine/aliases_test.go +++ b/engine/aliases_test.go @@ -12,7 +12,7 @@ func init() { } func TestAliasesGetAlias(t *testing.T) { alias := Alias{} - err := aliasService.GetAlias(Alias{ + err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", @@ -23,13 +23,13 @@ func TestAliasesGetAlias(t *testing.T) { if err != nil || len(alias.Values) != 2 || len(alias.Values[0].Pairs) != 2 { - t.Error("Error getting alias: ", err, alias) + t.Error("Error getting alias: ", err, alias, alias.Values[0]) } } func TestAliasesGetMatchingAlias(t *testing.T) { var response string - err := aliasService.GetMatchingAlias(AttrMatchingAlias{ + err := aliasService.Call("AliasesV1.GetMatchingAlias", &AttrMatchingAlias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", @@ -47,23 +47,26 @@ func TestAliasesGetMatchingAlias(t *testing.T) { func TestAliasesSetters(t *testing.T) { var out string - if err := aliasService.SetAlias(Alias{ - Direction: "*out", - Tenant: "cgrates.org", - Category: "call", - Account: "set", - Subject: "set", - Context: "*rating", - Values: AliasValues{&AliasValue{ - DestinationId: utils.ANY, - Pairs: AliasPairs{"Account": map[string]string{"1234": "1235"}}, - Weight: 10, - }}, + if err := aliasService.Call("AliasesV1.SetAlias", &AttrAddAlias{ + Alias: &Alias{ + Direction: "*out", + Tenant: "cgrates.org", + Category: "call", + Account: "set", + Subject: "set", + Context: "*rating", + Values: AliasValues{&AliasValue{ + DestinationId: utils.ANY, + Pairs: AliasPairs{"Account": map[string]string{"1234": "1235"}}, + Weight: 10, + }}, + }, + Overwrite: true, }, &out); err != nil || out != utils.OK { t.Error("Error setting alias: ", err, out) } r := &Alias{} - if err := aliasService.GetAlias(Alias{ + if err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", @@ -74,22 +77,25 @@ func TestAliasesSetters(t *testing.T) { t.Errorf("Error getting alias: %+v", r) } - if err := aliasService.UpdateAlias(Alias{ - Direction: "*out", - Tenant: "cgrates.org", - Category: "call", - Account: "set", - Subject: "set", - Context: "*rating", - Values: AliasValues{&AliasValue{ - DestinationId: utils.ANY, - Pairs: AliasPairs{"Subject": map[string]string{"1234": "1235"}}, - Weight: 10, - }}, + if err := aliasService.Call("AliasesV1.SetAlias", &AttrAddAlias{ + Alias: &Alias{ + Direction: "*out", + Tenant: "cgrates.org", + Category: "call", + Account: "set", + Subject: "set", + Context: "*rating", + Values: AliasValues{&AliasValue{ + DestinationId: utils.ANY, + Pairs: AliasPairs{"Subject": map[string]string{"1234": "1235"}}, + Weight: 10, + }}, + }, + Overwrite: false, }, &out); err != nil || out != utils.OK { t.Error("Error updateing alias: ", err, out) } - if err := aliasService.GetAlias(Alias{ + if err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", @@ -103,22 +109,25 @@ func TestAliasesSetters(t *testing.T) { r.Values[0].Pairs["Account"]["1234"] != "1235" { t.Errorf("Error getting alias: %+v", r.Values[0]) } - if err := aliasService.UpdateAlias(Alias{ - Direction: "*out", - Tenant: "cgrates.org", - Category: "call", - Account: "set", - Subject: "set", - Context: "*rating", - Values: AliasValues{&AliasValue{ - DestinationId: utils.ANY, - Pairs: AliasPairs{"Subject": map[string]string{"1111": "2222"}}, - Weight: 10, - }}, + if err := aliasService.Call("AliasesV1.SetAlias", &AttrAddAlias{ + Alias: &Alias{ + Direction: "*out", + Tenant: "cgrates.org", + Category: "call", + Account: "set", + Subject: "set", + Context: "*rating", + Values: AliasValues{&AliasValue{ + DestinationId: utils.ANY, + Pairs: AliasPairs{"Subject": map[string]string{"1111": "2222"}}, + Weight: 10, + }}, + }, + Overwrite: false, }, &out); err != nil || out != utils.OK { t.Error("Error updateing alias: ", err, out) } - if err := aliasService.GetAlias(Alias{ + if err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", @@ -128,22 +137,25 @@ func TestAliasesSetters(t *testing.T) { }, r); err != nil || len(r.Values) != 1 || len(r.Values[0].Pairs) != 2 || r.Values[0].Pairs["Subject"]["1111"] != "2222" { t.Errorf("Error getting alias: %+v", r.Values[0].Pairs["Subject"]) } - if err := aliasService.UpdateAlias(Alias{ - Direction: "*out", - Tenant: "cgrates.org", - Category: "call", - Account: "set", - Subject: "set", - Context: "*rating", - Values: AliasValues{&AliasValue{ - DestinationId: "NAT", - Pairs: AliasPairs{"Subject": map[string]string{"3333": "4444"}}, - Weight: 10, - }}, + if err := aliasService.Call("AliasesV1.SetAlias", &AttrAddAlias{ + Alias: &Alias{ + Direction: "*out", + Tenant: "cgrates.org", + Category: "call", + Account: "set", + Subject: "set", + Context: "*rating", + Values: AliasValues{&AliasValue{ + DestinationId: "NAT", + Pairs: AliasPairs{"Subject": map[string]string{"3333": "4444"}}, + Weight: 10, + }}, + }, + Overwrite: false, }, &out); err != nil || out != utils.OK { t.Error("Error updateing alias: ", err, out) } - if err := aliasService.GetAlias(Alias{ + if err := aliasService.Call("AliasesV1.GetAlias", &Alias{ Direction: "*out", Tenant: "cgrates.org", Category: "call", diff --git a/engine/balance_filter.go b/engine/balance_filter.go index f9e408d39..78ff64140 100644 --- a/engine/balance_filter.go +++ b/engine/balance_filter.go @@ -11,7 +11,7 @@ type BalanceFilter struct { Uuid *string ID *string Type *string - Value *float64 + Value *utils.ValueFormula Directions *utils.StringMap ExpirationDate *time.Time Weight *float64 @@ -58,7 +58,7 @@ func (bf *BalanceFilter) Clone() *BalanceFilter { *result.ID = *bf.ID } if bf.Value != nil { - result.Value = new(float64) + result.Value = new(utils.ValueFormula) *result.Value = *bf.Value } if bf.RatingSubject != nil { @@ -116,7 +116,7 @@ func (bf *BalanceFilter) LoadFromBalance(b *Balance) *BalanceFilter { bf.ID = &b.ID } if b.Value != 0 { - bf.Value = &b.Value + bf.Value.Static = b.Value } if !b.Directions.IsEmpty() { bf.Directions = &b.Directions @@ -173,14 +173,22 @@ func (bp *BalanceFilter) GetValue() float64 { if bp == nil || bp.Value == nil { return 0.0 } - return *bp.Value + if bp.Value.Method == "" { + return bp.Value.Static + } + // calculate using formula + formula, exists := utils.ValueFormulas[bp.Value.Method] + if !exists { + return 0.0 + } + return formula(bp.Value.Params) } func (bp *BalanceFilter) SetValue(v float64) { if bp.Value == nil { - bp.Value = new(float64) + bp.Value = new(utils.ValueFormula) } - *bp.Value = v + bp.Value.Static = v } func (bp *BalanceFilter) GetUuid() string { @@ -292,7 +300,7 @@ func (bf *BalanceFilter) ModifyBalance(b *Balance) { b.Directions = *bf.Directions } if bf.Value != nil { - b.Value = *bf.Value + b.Value = bf.GetValue() } if bf.ExpirationDate != nil { b.ExpirationDate = *bf.ExpirationDate diff --git a/engine/calldesc.go b/engine/calldesc.go index d5f3c155f..d62a0005e 100644 --- a/engine/calldesc.go +++ b/engine/calldesc.go @@ -28,7 +28,6 @@ import ( "time" "github.com/cgrates/cgrates/cache2go" - "github.com/cgrates/cgrates/history" "github.com/cgrates/cgrates/utils" "github.com/cgrates/rpcclient" ) @@ -70,17 +69,18 @@ func init() { } var ( - ratingStorage RatingStorage - accountingStorage AccountingStorage - storageLogger LogStorage - cdrStorage CdrStorage - debitPeriod = 10 * time.Second - globalRoundingDecimals = 6 - historyScribe history.Scribe - pubSubServer rpcclient.RpcClientConnection - userService UserService - aliasService AliasService - rpSubjectPrefixMatching bool + ratingStorage RatingStorage + accountingStorage AccountingStorage + storageLogger LogStorage + cdrStorage CdrStorage + debitPeriod = 10 * time.Second + globalRoundingDecimals = 6 + historyScribe rpcclient.RpcClientConnection + pubSubServer rpcclient.RpcClientConnection + userService rpcclient.RpcClientConnection + aliasService rpcclient.RpcClientConnection + rpSubjectPrefixMatching bool + lcrSubjectPrefixMatching bool ) // Exported method to set the storage getter. @@ -101,6 +101,10 @@ func SetRpSubjectPrefixMatching(flag bool) { rpSubjectPrefixMatching = flag } +func SetLcrSubjectPrefixMatching(flag bool) { + lcrSubjectPrefixMatching = flag +} + /* Sets the database for logging (can be de same as storage getter or different db) */ @@ -116,7 +120,7 @@ func SetCdrStorage(cStorage CdrStorage) { } // Exported method to set the history scribe. -func SetHistoryScribe(scribe history.Scribe) { +func SetHistoryScribe(scribe rpcclient.RpcClientConnection) { historyScribe = scribe } @@ -124,11 +128,11 @@ func SetPubSub(ps rpcclient.RpcClientConnection) { pubSubServer = ps } -func SetUserService(us UserService) { +func SetUserService(us rpcclient.RpcClientConnection) { userService = us } -func SetAliasService(as AliasService) { +func SetAliasService(as rpcclient.RpcClientConnection) { aliasService = as } @@ -188,7 +192,11 @@ func (cd *CallDescriptor) getAccount() (ub *Account, err error) { cd.account, err = accountingStorage.GetAccount(cd.GetAccountKey()) } if cd.account != nil && cd.account.Disabled { - return nil, fmt.Errorf("User %s is disabled", cd.account.ID) + return nil, utils.ErrAccountDisabled + } + if err != nil || cd.account == nil { + utils.Logger.Warning(fmt.Sprintf("Account: %s, not found (%v)", cd.GetAccountKey(), err)) + return nil, utils.ErrAccountNotFound } return cd.account, err } @@ -353,6 +361,7 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) { // split on rating plans afterStart, afterEnd := false, false //optimization for multiple activation periods for _, rp := range cd.RatingInfos { + //log.Print("RP: ", utils.ToJSON(rp)) if !afterStart && !afterEnd && rp.ActivationTime.Before(cd.TimeStart) { firstSpan.setRatingInfo(rp) } else { @@ -360,7 +369,6 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) { for i := 0; i < len(timespans); i++ { newTs := timespans[i].SplitByRatingPlan(rp) if newTs != nil { - //log.Print("NEW TS", newTs.TimeStart) timespans = append(timespans, newTs) } else { afterEnd = true @@ -369,8 +377,23 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) { } } } + //log.Printf("After SplitByRatingPlan: %+v", utils.ToJSON(timespans)) + // split on days + for i := 0; i < len(timespans); i++ { + rp := timespans[i].ratingInfo + newTs := timespans[i].SplitByDay() + if newTs != nil { + //log.Print("NEW TS: ", newTs.TimeStart, newTs.TimeEnd) + newTs.setRatingInfo(rp) + // insert the new timespan + index := i + 1 + timespans = append(timespans, nil) + copy(timespans[index+1:], timespans[index:]) + timespans[index] = newTs + } + } } - // utils.Logger.Debug(fmt.Sprintf("After SplitByRatingPlan: %+v", timespans)) + //log.Printf("After SplitByDay: %+v", utils.ToJSON(timespans)) // split on rate intervals for i := 0; i < len(timespans); i++ { //log.Printf("==============%v==================", i) @@ -379,6 +402,7 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) { // utils.Logger.Debug(fmt.Sprintf("rp: %+v", rp)) //timespans[i].RatingPlan = nil rateIntervals := rp.SelectRatingIntevalsForTimespan(timespans[i]) + //log.Print("RIs: ", utils.ToJSON(rateIntervals)) /*for _, interval := range rp.RateIntervals { if !timespans[i].hasBetterRateIntervalThan(interval) { timespans[i].SetRateInterval(interval) @@ -511,6 +535,7 @@ func (cd *CallDescriptor) getCost() (*CallCost, error) { cd.TOR = utils.VOICE } err := cd.LoadRatingPlans() + //log.Print("RI: ", utils.ToJSON(cd.RatingInfos)) if err != nil { //utils.Logger.Err(fmt.Sprintf("error getting cost for key <%s>: %s", cd.GetKey(cd.Subject), err.Error())) return &CallCost{Cost: -1}, err @@ -635,13 +660,9 @@ func (origCD *CallDescriptor) getMaxSessionDuration(origAcc *Account) (time.Dura func (cd *CallDescriptor) GetMaxSessionDuration() (duration time.Duration, err error) { cd.account = nil // make sure it's not cached - if account, err := cd.getAccount(); err != nil || account == nil { - utils.Logger.Err(fmt.Sprintf("Account: %s, not found", cd.GetAccountKey())) - return 0, utils.ErrAccountNotFound + if account, err := cd.getAccount(); err != nil { + return 0, err } else { - if account.Disabled { - return 0, utils.ErrAccountDisabled - } if memberIds, err := account.GetUniqueSharedGroupMembers(cd); err == nil { if _, err := Guardian.Guard(func() (interface{}, error) { duration, err = cd.getMaxSessionDuration(account) @@ -705,13 +726,9 @@ func (cd *CallDescriptor) debit(account *Account, dryRun bool, goNegative bool) func (cd *CallDescriptor) Debit() (cc *CallCost, err error) { cd.account = nil // make sure it's not cached // lock all group members - if account, err := cd.getAccount(); err != nil || account == nil { - utils.Logger.Err(fmt.Sprintf("Account: %s, not found", cd.GetAccountKey())) - return nil, utils.ErrAccountNotFound + if account, err := cd.getAccount(); err != nil { + return nil, err } else { - if account.Disabled { - return nil, utils.ErrAccountDisabled - } if memberIds, sgerr := account.GetUniqueSharedGroupMembers(cd); sgerr == nil { _, err = Guardian.Guard(func() (interface{}, error) { cc, err = cd.debit(account, cd.DryRun, true) @@ -730,13 +747,9 @@ func (cd *CallDescriptor) Debit() (cc *CallCost, err error) { // by the GetMaxSessionDuration method. The amount filed has to be filled in call descriptor. func (cd *CallDescriptor) MaxDebit() (cc *CallCost, err error) { cd.account = nil // make sure it's not cached - if account, err := cd.getAccount(); err != nil || account == nil { - utils.Logger.Err(fmt.Sprintf("Account: %s, not found", cd.GetAccountKey())) - return nil, utils.ErrAccountNotFound + if account, err := cd.getAccount(); err != nil { + return nil, err } else { - if account.Disabled { - return nil, utils.ErrAccountDisabled - } //log.Printf("ACC: %+v", account) if memberIDs, err := account.GetUniqueSharedGroupMembers(cd); err == nil { _, err = Guardian.Guard(func() (interface{}, error) { @@ -920,6 +933,8 @@ func (cd *CallDescriptor) Clone() *CallDescriptor { ForceDuration: cd.ForceDuration, PerformRounding: cd.PerformRounding, DryRun: cd.DryRun, + CgrID: cd.CgrID, + RunID: cd.RunID, } } @@ -932,6 +947,15 @@ func (cd *CallDescriptor) GetLCRFromStorage() (*LCR, error) { utils.LCRKey(cd.Direction, utils.ANY, utils.ANY, utils.ANY, utils.ANY), utils.LCRKey(utils.ANY, utils.ANY, utils.ANY, utils.ANY, utils.ANY), } + if lcrSubjectPrefixMatching { + var partialSubjects []string + lenSubject := len(cd.Subject) + for i := 1; i < lenSubject; i++ { + partialSubjects = append(partialSubjects, utils.LCRKey(cd.Direction, cd.Tenant, cd.Category, cd.Account, cd.Subject[:lenSubject-i])) + } + // insert partialsubjects into keyVariants + keyVariants = append(keyVariants[:1], append(partialSubjects, keyVariants[1:]...)...) + } for _, key := range keyVariants { if lcr, err := ratingStorage.GetLCR(key, false); err != nil && err != utils.ErrNotFound { return nil, err @@ -942,7 +966,7 @@ func (cd *CallDescriptor) GetLCRFromStorage() (*LCR, error) { return nil, utils.ErrNotFound } -func (cd *CallDescriptor) GetLCR(stats StatsInterface, p *utils.Paginator) (*LCRCost, error) { +func (cd *CallDescriptor) GetLCR(stats rpcclient.RpcClientConnection, p *utils.Paginator) (*LCRCost, error) { cd.account = nil // make sure it's not cached lcr, err := cd.GetLCRFromStorage() if err != nil { @@ -1073,7 +1097,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface, p *utils.Paginator) (*LCR if lcrCost.Entry.Strategy == LCR_STRATEGY_LOAD { for _, qId := range cdrStatsQueueIds { sq := &StatsQueue{} - if err := stats.GetQueue(qId, sq); err == nil { + if err := stats.Call("CDRStatsV1.GetQueue", qId, sq); err == nil { if sq.conf.QueueLength == 0 { //only add qeues that don't have fixed length supplierQueues = append(supplierQueues, sq) } @@ -1081,7 +1105,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface, p *utils.Paginator) (*LCR } } else { statValues := make(map[string]float64) - if err := stats.GetValues(qId, &statValues); err != nil { + if err := stats.Call("CDRStatsV1.GetValues", qId, &statValues); err != nil { lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{ Supplier: fullSupplier, Error: fmt.Sprintf("Get stats values for queue id %s, error %s", qId, err.Error()), diff --git a/engine/calldesc_test.go b/engine/calldesc_test.go index 540bc8dad..f2dbbdd08 100644 --- a/engine/calldesc_test.go +++ b/engine/calldesc_test.go @@ -41,12 +41,12 @@ func init() { func populateDB() { ats := []*Action{ - &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10)}}, - &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Weight: utils.Float64Pointer(20), Value: utils.Float64Pointer(10), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT"))}}, + &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}}}, + &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.VOICE), Weight: utils.Float64Pointer(20), Value: &utils.ValueFormula{Static: 10}, DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT"))}}, } ats1 := []*Action{ - &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: utils.Float64Pointer(10)}, Weight: 10}, + &Action{ActionType: "*topup", Balance: &BalanceFilter{Type: utils.StringPointer(utils.MONETARY), Value: &utils.ValueFormula{Static: 10}}, Weight: 10}, &Action{ActionType: "*reset_account", Weight: 20}, } @@ -326,6 +326,43 @@ func TestGetCostRatingPlansAndRatingIntervalsMore(t *testing.T) { } } +func TestGetCostRatingPlansAndRatingIntervalsMoreDays(t *testing.T) { + t1 := time.Date(2012, time.February, 20, 9, 50, 0, 0, time.UTC) + t2 := time.Date(2012, time.February, 23, 18, 10, 0, 0, time.UTC) + cd := &CallDescriptor{Direction: "*out", Category: "0", Tenant: "CUSTOMER_1", Subject: "rif:from:tm", Destination: "49178", TimeStart: t1, TimeEnd: t2, LoopIndex: 0, DurationIndex: t2.Sub(t1)} + result, _ := cd.GetCost() + if len(result.Timespans) != 8 || + !result.Timespans[0].TimeEnd.Equal(result.Timespans[1].TimeStart) || + !result.Timespans[1].TimeEnd.Equal(result.Timespans[2].TimeStart) || + !result.Timespans[2].TimeEnd.Equal(result.Timespans[3].TimeStart) || + !result.Timespans[3].TimeEnd.Equal(result.Timespans[4].TimeStart) || + !result.Timespans[4].TimeEnd.Equal(result.Timespans[5].TimeStart) || + !result.Timespans[5].TimeEnd.Equal(result.Timespans[6].TimeStart) || + !result.Timespans[6].TimeEnd.Equal(result.Timespans[7].TimeStart) { + for _, ts := range result.Timespans { + t.Logf("TS %+v", ts) + } + t.Errorf("Expected %+v was %+v", 4, len(result.Timespans)) + } +} + +func TestGetCostRatingPlansAndRatingIntervalsMoreDaysWeekend(t *testing.T) { + t1 := time.Date(2012, time.February, 24, 9, 50, 0, 0, time.UTC) + t2 := time.Date(2012, time.February, 27, 18, 10, 0, 0, time.UTC) + cd := &CallDescriptor{Direction: "*out", Category: "0", Tenant: "CUSTOMER_1", Subject: "rif:from:tm", Destination: "49178", TimeStart: t1, TimeEnd: t2, LoopIndex: 0, DurationIndex: t2.Sub(t1)} + result, _ := cd.GetCost() + if len(result.Timespans) != 5 || + !result.Timespans[0].TimeEnd.Equal(result.Timespans[1].TimeStart) || + !result.Timespans[1].TimeEnd.Equal(result.Timespans[2].TimeStart) || + !result.Timespans[2].TimeEnd.Equal(result.Timespans[3].TimeStart) || + !result.Timespans[3].TimeEnd.Equal(result.Timespans[4].TimeStart) { + for _, ts := range result.Timespans { + t.Logf("TS %+v", ts) + } + t.Errorf("Expected %+v was %+v", 4, len(result.Timespans)) + } +} + func TestGetCostRateGroups(t *testing.T) { t1 := time.Date(2013, time.October, 7, 14, 50, 0, 0, time.UTC) t2 := time.Date(2013, time.October, 7, 14, 52, 12, 0, time.UTC) @@ -566,7 +603,7 @@ func TestGetMaxSessiontWithBlocker(t *testing.T) { MaxCostSoFar: 0, } result, err := cd.GetMaxSessionDuration() - expected := 30 * time.Minute + expected := 17 * time.Minute if result != expected || err != nil { t.Errorf("Expected %v was %v (%v)", expected, result, err) } @@ -588,6 +625,57 @@ func TestGetMaxSessiontWithBlocker(t *testing.T) { } } +func TestGetMaxSessiontWithBlockerEmpty(t *testing.T) { + ap, _ := ratingStorage.GetActionPlan("BLOCK_EMPTY_AT", false) + for _, at := range ap.ActionTimings { + at.accountIDs = ap.AccountIDs + at.Execute() + } + acc, err := accountingStorage.GetAccount("cgrates.org:block_empty") + if err != nil { + t.Error("error getting account: ", err) + } + if len(acc.BalanceMap[utils.MONETARY]) != 2 || + acc.BalanceMap[utils.MONETARY][0].Blocker != true { + for _, b := range acc.BalanceMap[utils.MONETARY] { + t.Logf("B: %+v", b) + } + t.Error("Error executing action plan on account: ", acc.BalanceMap[utils.MONETARY]) + } + cd := &CallDescriptor{ + Direction: "*out", + Category: "call", + Tenant: "cgrates.org", + Subject: "block", + Account: "block_empty", + Destination: "0723", + TimeStart: time.Date(2016, 1, 13, 14, 0, 0, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 13, 14, 30, 0, 0, time.UTC), + MaxCostSoFar: 0, + } + result, err := cd.GetMaxSessionDuration() + expected := 0 * time.Minute + if result != expected || err != nil { + t.Errorf("Expected %v was %v (%v)", expected, result, err) + } + cd = &CallDescriptor{ + Direction: "*out", + Category: "call", + Tenant: "cgrates.org", + Subject: "block", + Account: "block_empty", + Destination: "444", + TimeStart: time.Date(2016, 1, 13, 14, 0, 0, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 13, 14, 30, 0, 0, time.UTC), + MaxCostSoFar: 0, + } + result, err = cd.GetMaxSessionDuration() + expected = 30 * time.Minute + if result != expected || err != nil { + t.Errorf("Expected %v was %v (%v)", expected, result, err) + } +} + func TestGetCostWithMaxCost(t *testing.T) { ap, _ := ratingStorage.GetActionPlan("TOPUP10_AT", false) for _, at := range ap.ActionTimings { diff --git a/engine/cdr.go b/engine/cdr.go index de741ed3e..bbdb56f84 100644 --- a/engine/cdr.go +++ b/engine/cdr.go @@ -745,8 +745,7 @@ type UsageRecord struct { func (self *UsageRecord) AsStoredCdr(timezone string) (*CDR, error) { var err error - cdr := &CDR{ToR: self.ToR, RequestType: self.RequestType, Direction: self.Direction, Tenant: self.Tenant, Category: self.Category, - Account: self.Account, Subject: self.Subject, Destination: self.Destination} + cdr := &CDR{CGRID: self.GetId(), ToR: self.ToR, RequestType: self.RequestType, Direction: self.Direction, Tenant: self.Tenant, Category: self.Category, Account: self.Account, Subject: self.Subject, Destination: self.Destination} if cdr.SetupTime, err = utils.ParseTimeDetectLayout(self.SetupTime, timezone); err != nil { return nil, err } @@ -768,6 +767,7 @@ func (self *UsageRecord) AsStoredCdr(timezone string) (*CDR, error) { func (self *UsageRecord) AsCallDescriptor(timezone string) (*CallDescriptor, error) { var err error cd := &CallDescriptor{ + CgrID: self.GetId(), TOR: self.ToR, Direction: self.Direction, Tenant: self.Tenant, @@ -796,3 +796,7 @@ func (self *UsageRecord) AsCallDescriptor(timezone string) (*CallDescriptor, err } return cd, nil } + +func (self *UsageRecord) GetId() string { + return utils.Sha1(self.ToR, self.RequestType, self.Direction, self.Tenant, self.Category, self.Account, self.Subject, self.Destination, self.SetupTime, self.AnswerTime, self.Usage) +} diff --git a/engine/cdr_local_test.go b/engine/cdr_local_test.go index 823e8f6d5..edf7c8880 100644 --- a/engine/cdr_local_test.go +++ b/engine/cdr_local_test.go @@ -19,10 +19,12 @@ along with this program. If not, see package engine import ( + "encoding/json" "flag" - "github.com/cgrates/cgrates/utils" "testing" "time" + + "github.com/cgrates/cgrates/utils" ) // Arguments received via test command @@ -42,7 +44,8 @@ func TestHttpJsonPost(t *testing.T) { RunID: utils.DEFAULT_RUNID, Usage: "0.00000001", ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, Cost: 1.01, } - if _, err := utils.HttpJsonPost("http://localhost:8000", false, cdrOut); err == nil { + jsn, _ := json.Marshal(cdrOut) + if _, err := utils.HttpJsonPost("http://localhost:8000", false, jsn); err == nil { t.Error(err) } } diff --git a/engine/cdr_test.go b/engine/cdr_test.go index 4aabc0df1..af2529958 100644 --- a/engine/cdr_test.go +++ b/engine/cdr_test.go @@ -566,9 +566,7 @@ func TestUsageReqAsCD(t *testing.T) { Account: "1001", Subject: "1001", Destination: "1002", SetupTime: "2013-11-07T08:42:20Z", AnswerTime: "2013-11-07T08:42:26Z", Usage: "0.00000001", } - eCD := &CallDescriptor{TOR: req.ToR, Direction: req.Direction, - Tenant: req.Tenant, Category: req.Category, Account: req.Account, Subject: req.Subject, Destination: req.Destination, - TimeStart: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC), TimeEnd: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).Add(time.Duration(10))} + eCD := &CallDescriptor{CgrID: "9473e7b2e075d168b9da10ae957ee68fe5a217e4", TOR: req.ToR, Direction: req.Direction, Tenant: req.Tenant, Category: req.Category, Account: req.Account, Subject: req.Subject, Destination: req.Destination, TimeStart: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC), TimeEnd: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).Add(time.Duration(10))} if cd, err := req.AsCallDescriptor(""); err != nil { t.Error(err) } else if !reflect.DeepEqual(eCD, cd) { diff --git a/engine/cdrs.go b/engine/cdrs.go index 27cd49dc6..293069549 100644 --- a/engine/cdrs.go +++ b/engine/cdrs.go @@ -19,17 +19,19 @@ along with this program. If not, see package engine import ( + "encoding/json" "fmt" "io/ioutil" "net/http" "path" + "reflect" + "strings" "time" + "github.com/cgrates/cgrates/cache2go" "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/utils" "github.com/cgrates/rpcclient" - "github.com/jinzhu/gorm" - mgov2 "gopkg.in/mgo.v2" ) var cdrServer *CdrServer // Share the server so we can use it in http handlers @@ -38,6 +40,7 @@ type CallCostLog struct { CgrId string Source string RunId string + Usage float64 // real usage (not increment rounded) CallCost *CallCost CheckDuplicate bool } @@ -67,24 +70,51 @@ func fsCdrHandler(w http.ResponseWriter, r *http.Request) { } } -func NewCdrServer(cgrCfg *config.CGRConfig, cdrDb CdrStorage, rater Connector, pubsub rpcclient.RpcClientConnection, users UserService, aliases AliasService, stats StatsInterface) (*CdrServer, error) { - return &CdrServer{cgrCfg: cgrCfg, cdrDb: cdrDb, rater: rater, pubsub: pubsub, users: users, aliases: aliases, stats: stats, guard: &GuardianLock{locksMap: make(map[string]chan bool)}}, nil +func NewCdrServer(cgrCfg *config.CGRConfig, cdrDb CdrStorage, rater, pubsub, users, aliases, stats rpcclient.RpcClientConnection) (*CdrServer, error) { + if rater == nil || reflect.ValueOf(rater).IsNil() { // Work around so we store actual nil instead of nil interface value, faster to check here than in CdrServer code + rater = nil + } + if pubsub == nil || reflect.ValueOf(pubsub).IsNil() { + pubsub = nil + } + if users == nil || reflect.ValueOf(users).IsNil() { + users = nil + } + if aliases == nil || reflect.ValueOf(aliases).IsNil() { + aliases = nil + } + if stats == nil || reflect.ValueOf(stats).IsNil() { + stats = nil + } + return &CdrServer{cgrCfg: cgrCfg, cdrDb: cdrDb, rals: rater, pubsub: pubsub, users: users, aliases: aliases, stats: stats, guard: Guardian}, nil } type CdrServer struct { - cgrCfg *config.CGRConfig - cdrDb CdrStorage - rater Connector - pubsub rpcclient.RpcClientConnection - users UserService - aliases AliasService - stats StatsInterface - guard *GuardianLock + cgrCfg *config.CGRConfig + cdrDb CdrStorage + rals rpcclient.RpcClientConnection + pubsub rpcclient.RpcClientConnection + users rpcclient.RpcClientConnection + aliases rpcclient.RpcClientConnection + stats rpcclient.RpcClientConnection + guard *GuardianLock + responseCache *cache2go.ResponseCache } func (self *CdrServer) Timezone() string { return self.cgrCfg.DefaultTimezone } +func (self *CdrServer) SetTimeToLive(timeToLive time.Duration, out *int) error { + self.responseCache = cache2go.NewResponseCache(timeToLive) + return nil +} + +func (self *CdrServer) getCache() *cache2go.ResponseCache { + if self.responseCache == nil { + self.responseCache = cache2go.NewResponseCache(0) + } + return self.responseCache +} func (self *CdrServer) RegisterHandlersToServer(server *utils.Server) { cdrServer = self // Share the server object for handlers @@ -92,12 +122,12 @@ func (self *CdrServer) RegisterHandlersToServer(server *utils.Server) { server.RegisterHttpFunc("/freeswitch_json", fsCdrHandler) } -// RPC method, used to internally process CDR -func (self *CdrServer) ProcessCdr(cdr *CDR) error { +// Used to internally process CDR +func (self *CdrServer) LocalProcessCdr(cdr *CDR) error { return self.processCdr(cdr) } -// RPC method, used to process external CDRs +// Used to process external CDRs func (self *CdrServer) ProcessExternalCdr(eCDR *ExternalCDR) error { cdr, err := NewCDRFromExternalCDR(eCDR, self.cgrCfg.DefaultTimezone) if err != nil { @@ -106,54 +136,24 @@ func (self *CdrServer) ProcessExternalCdr(eCDR *ExternalCDR) error { return self.processCdr(cdr) } -// RPC method, used to log callcosts to db -func (self *CdrServer) LogCallCost(ccl *CallCostLog) error { - ccl.CallCost.UpdateCost() // make sure the total cost reflect the increments - ccl.CallCost.UpdateRatedUsage() // make sure rated usage is updated - if ccl.CheckDuplicate { +func (self *CdrServer) storeSMCost(smCost *SMCost, checkDuplicate bool) error { + smCost.CostDetails.UpdateCost() // make sure the total cost reflect the increments + smCost.CostDetails.UpdateRatedUsage() // make sure rated usage is updated + lockKey := utils.CDRS_SOURCE + smCost.CGRID + smCost.RunID + smCost.OriginID // Will lock on this ID + if checkDuplicate { _, err := self.guard.Guard(func() (interface{}, error) { - cc, err := self.cdrDb.GetCallCostLog(ccl.CgrId, ccl.RunId) - if err != nil && err != gorm.RecordNotFound && err != mgov2.ErrNotFound { + smCosts, err := self.cdrDb.GetSMCosts(smCost.CGRID, smCost.RunID, "", "") + if err != nil { return nil, err } - if cc != nil { + if len(smCosts) != 0 { return nil, utils.ErrExists } - return nil, self.cdrDb.LogCallCost(ccl.CgrId, ccl.RunId, ccl.Source, ccl.CallCost) - }, 0, ccl.CgrId) + return nil, self.cdrDb.SetSMCost(smCost) + }, time.Duration(2*time.Second), lockKey) // FixMe: Possible deadlock with Guard from SMG session close() return err } - return self.cdrDb.LogCallCost(ccl.CgrId, ccl.RunId, ccl.Source, ccl.CallCost) -} - -// Called by rate/re-rate API -func (self *CdrServer) RateCDRs(cdrFltr *utils.CDRsFilter, sendToStats bool) error { - cdrs, _, err := self.cdrDb.GetCDRs(cdrFltr, false) - if err != nil { - return err - } - for _, cdr := range cdrs { - // replace user profile fields - if err := LoadUserProfile(cdr, utils.EXTRA_FIELDS); err != nil { - return err - } - // replace aliases for cases they were loaded after CDR received - if err := LoadAlias(&AttrMatchingAlias{ - Destination: cdr.Destination, - Direction: cdr.Direction, - Tenant: cdr.Tenant, - Category: cdr.Category, - Account: cdr.Account, - Subject: cdr.Subject, - Context: utils.ALIAS_CONTEXT_RATING, - }, cdr, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { - return err - } - if err := self.rateStoreStatsReplicate(cdr, sendToStats); err != nil { - utils.Logger.Err(fmt.Sprintf(" Processing CDR %+v, got error: %s", cdr, err.Error())) - } - } - return nil + return self.cdrDb.SetSMCost(smCost) } // Returns error if not able to properly store the CDR, mediation is async since we can always recover offline @@ -173,7 +173,7 @@ func (self *CdrServer) processCdr(cdr *CDR) (err error) { if cdr.Subject == "" { // Use account information as rating subject if missing cdr.Subject = cdr.Account } - if !cdr.Rated { + if !cdr.Rated { // Enforce the RunID if CDR is not rated cdr.RunID = utils.MetaRaw } if self.cgrCfg.CDRSStoreCdrs { // Store RawCDRs, this we do sync so we can reply with the status @@ -181,88 +181,103 @@ func (self *CdrServer) processCdr(cdr *CDR) (err error) { cdr.CostDetails.UpdateCost() cdr.CostDetails.UpdateRatedUsage() } - if err := self.cdrDb.SetCDR(cdr, false); err != nil { // Only original CDR stored in primary table, no derived + if err := self.cdrDb.SetCDR(cdr, false); err != nil { utils.Logger.Err(fmt.Sprintf(" Storing primary CDR %+v, got error: %s", cdr, err.Error())) return err // Error is propagated back and we don't continue processing the CDR if we cannot store it } } - go self.deriveRateStoreStatsReplicate(cdr) + // Attach raw CDR to stats + if self.stats != nil { // Send raw CDR to stats + var out int + go self.stats.Call("CDRStatsV1.AppendCDR", cdr, &out) + } + if len(self.cgrCfg.CDRSCdrReplication) != 0 { // Replicate raw CDR + go self.replicateCdr(cdr) + } + + if self.rals != nil && !cdr.Rated { // CDRs not rated will be processed by Rating + go self.deriveRateStoreStatsReplicate(cdr, self.cgrCfg.CDRSStoreCdrs, self.stats != nil, len(self.cgrCfg.CDRSCdrReplication) != 0) + } return nil } // Returns error if not able to properly store the CDR, mediation is async since we can always recover offline -func (self *CdrServer) deriveRateStoreStatsReplicate(cdr *CDR) error { +func (self *CdrServer) deriveRateStoreStatsReplicate(cdr *CDR, store, stats, replicate bool) error { cdrRuns, err := self.deriveCdrs(cdr) if err != nil { return err } + var ratedCDRs []*CDR // Gather all CDRs received from rating subsystem for _, cdrRun := range cdrRuns { - if err := self.rateStoreStatsReplicate(cdrRun, true); err != nil { - return err + if err := LoadUserProfile(cdrRun, utils.EXTRA_FIELDS); err != nil { + utils.Logger.Err(fmt.Sprintf(" UserS handling for CDR %+v, got error: %s", cdrRun, err.Error())) + continue + } + if err := LoadAlias(&AttrMatchingAlias{ + Destination: cdrRun.Destination, + Direction: cdrRun.Direction, + Tenant: cdrRun.Tenant, + Category: cdrRun.Category, + Account: cdrRun.Account, + Subject: cdrRun.Subject, + Context: utils.ALIAS_CONTEXT_RATING, + }, cdrRun, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { + utils.Logger.Err(fmt.Sprintf(" Aliasing CDR %+v, got error: %s", cdrRun, err.Error())) + continue + } + rcvRatedCDRs, err := self.rateCDR(cdrRun) + if err != nil { + cdrRun.Cost = -1.0 // If there was an error, mark the CDR + cdrRun.ExtraInfo = err.Error() + rcvRatedCDRs = []*CDR{cdrRun} + } + ratedCDRs = append(ratedCDRs, rcvRatedCDRs...) + } + // Request should be processed by SureTax + for _, ratedCDR := range ratedCDRs { + if ratedCDR.RunID == utils.META_SURETAX { + if err := SureTaxProcessCdr(ratedCDR); err != nil { + ratedCDR.Cost = -1.0 + ratedCDR.ExtraInfo = err.Error() // Something failed, write the error in the ExtraInfo + } } } - return nil -} - -func (self *CdrServer) rateStoreStatsReplicate(cdr *CDR, sendToStats bool) error { - if cdr.RunID == utils.MetaRaw { // Overwrite *raw with *default for rating - cdr.RunID = utils.META_DEFAULT - } - if err := LoadUserProfile(cdr, utils.EXTRA_FIELDS); err != nil { - return err - } - if err := LoadAlias(&AttrMatchingAlias{ - Destination: cdr.Destination, - Direction: cdr.Direction, - Tenant: cdr.Tenant, - Category: cdr.Category, - Account: cdr.Account, - Subject: cdr.Subject, - Context: utils.ALIAS_CONTEXT_RATING, - }, cdr, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { - return err - } - - // Rate CDR - if self.rater != nil && !cdr.Rated { - if err := self.rateCDR(cdr); err != nil { - cdr.Cost = -1.0 // If there was an error, mark the CDR - cdr.ExtraInfo = err.Error() - } - } - if cdr.RunID == utils.META_SURETAX { // Request should be processed by SureTax - if err := SureTaxProcessCdr(cdr); err != nil { - cdr.Cost = -1.0 - cdr.ExtraInfo = err.Error() // Something failed, write the error in the ExtraInfo - } - } - if self.cgrCfg.CDRSStoreCdrs { // Store CDRs - // Store RatedCDR - if cdr.CostDetails != nil { - cdr.CostDetails.UpdateCost() - cdr.CostDetails.UpdateRatedUsage() - } - if err := self.cdrDb.SetCDR(cdr, true); err != nil { - utils.Logger.Err(fmt.Sprintf(" Storing rated CDR %+v, got error: %s", cdr, err.Error())) + // Store rated CDRs + if store { + for _, ratedCDR := range ratedCDRs { + if ratedCDR.CostDetails != nil { + ratedCDR.CostDetails.UpdateCost() + ratedCDR.CostDetails.UpdateRatedUsage() + } + if err := self.cdrDb.SetCDR(ratedCDR, true); err != nil { + utils.Logger.Err(fmt.Sprintf(" Storing rated CDR %+v, got error: %s", ratedCDR, err.Error())) + } } } // Attach CDR to stats - if self.stats != nil && sendToStats { // Send CDR to stats - if err := self.stats.AppendCDR(cdr, nil); err != nil { - utils.Logger.Err(fmt.Sprintf(" Could not append CDR to stats: %s", err.Error())) + if stats { // Send CDR to stats + for _, ratedCDR := range ratedCDRs { + var out int + if err := self.stats.Call("CDRStatsV1.AppendCDR", ratedCDR, &out); err != nil { + utils.Logger.Err(fmt.Sprintf(" Could not send CDR to stats: %s", err.Error())) + } } } - if len(self.cgrCfg.CDRSCdrReplication) != 0 { - self.replicateCdr(cdr) + if replicate { + for _, ratedCDR := range ratedCDRs { + self.replicateCdr(ratedCDR) + } } return nil } func (self *CdrServer) deriveCdrs(cdr *CDR) ([]*CDR, error) { - cdrRuns := []*CDR{cdr} + dfltCDRRun := cdr.Clone() + cdrRuns := []*CDR{dfltCDRRun} if cdr.RunID != utils.MetaRaw { // Only derive *raw CDRs return cdrRuns, nil } + dfltCDRRun.RunID = utils.META_DEFAULT // Rewrite *raw with *default since we have it as first run if err := LoadUserProfile(cdr, utils.EXTRA_FIELDS); err != nil { return nil, err } @@ -277,11 +292,10 @@ func (self *CdrServer) deriveCdrs(cdr *CDR) ([]*CDR, error) { }, cdr, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { return nil, err } - attrsDC := &utils.AttrDerivedChargers{Tenant: cdr.Tenant, Category: cdr.Category, Direction: cdr.Direction, Account: cdr.Account, Subject: cdr.Subject, Destination: cdr.Destination} var dcs utils.DerivedChargers - if err := self.rater.GetDerivedChargers(attrsDC, &dcs); err != nil { + if err := self.rals.Call("Responder.GetDerivedChargers", attrsDC, &dcs); err != nil { utils.Logger.Err(fmt.Sprintf("Could not get derived charging for cgrid %s, error: %s", cdr.CGRID, err.Error())) return nil, err } @@ -326,37 +340,56 @@ func (self *CdrServer) deriveCdrs(cdr *CDR) ([]*CDR, error) { return cdrRuns, nil } -func (self *CdrServer) rateCDR(cdr *CDR) error { +// rateCDR will populate cost field +// Returns more than one rated CDR in case of SMCost retrieved based on prefix +func (self *CdrServer) rateCDR(cdr *CDR) ([]*CDR, error) { var qryCC *CallCost var err error if cdr.RequestType == utils.META_NONE { - return nil + return nil, nil } - if utils.IsSliceMember([]string{utils.META_PREPAID, utils.PREPAID}, cdr.RequestType) && cdr.Usage != 0 { // ToDo: Get rid of PREPAID as soon as we don't want to support it backwards + var cdrsRated []*CDR + _, hasLastUsed := cdr.ExtraFields[utils.LastUsed] + if utils.IsSliceMember([]string{utils.META_PREPAID, utils.PREPAID}, cdr.RequestType) && (cdr.Usage != 0 || hasLastUsed) { // ToDo: Get rid of PREPAID as soon as we don't want to support it backwards // Should be previously calculated and stored in DB delay := utils.Fib() + var smCosts []*SMCost for i := 0; i < 4; i++ { - qryCC, err = self.cdrDb.GetCallCostLog(cdr.CGRID, cdr.RunID) - if err == nil { + smCosts, err = self.cdrDb.GetSMCosts(cdr.CGRID, cdr.RunID, cdr.OriginHost, cdr.ExtraFields[utils.OriginIDPrefix]) + if err == nil && len(smCosts) != 0 { break } - time.Sleep(delay()) + if i != 3 { + time.Sleep(delay()) + } } - if err != nil && (err == gorm.RecordNotFound || err == mgov2.ErrNotFound) { //calculate CDR as for pseudoprepaid + if len(smCosts) != 0 { // Cost retrieved from SMCost table + for _, smCost := range smCosts { + cdrClone := cdr.Clone() + cdrClone.OriginID = smCost.OriginID + if cdr.Usage == 0 { + cdrClone.Usage = time.Duration(smCost.Usage * utils.NANO_MULTIPLIER) // Usage is float as seconds, convert back to duration + } + cdrClone.Cost = smCost.CostDetails.Cost + cdrClone.CostDetails = smCost.CostDetails + cdrsRated = append(cdrsRated, cdrClone) + } + return cdrsRated, nil + } + if len(smCosts) == 0 { //calculate CDR as for pseudoprepaid utils.Logger.Warning(fmt.Sprintf(" WARNING: Could not find CallCostLog for cgrid: %s, source: %s, runid: %s, will recalculate", cdr.CGRID, utils.SESSION_MANAGER_SOURCE, cdr.RunID)) qryCC, err = self.getCostFromRater(cdr) } - } else { qryCC, err = self.getCostFromRater(cdr) } if err != nil { - return err + return nil, err } else if qryCC != nil { cdr.Cost = qryCC.Cost cdr.CostDetails = qryCC } - return nil + return []*CDR{cdr}, nil } // Retrive the cost from engine @@ -381,9 +414,9 @@ func (self *CdrServer) getCostFromRater(cdr *CDR) (*CallCost, error) { PerformRounding: true, } if utils.IsSliceMember([]string{utils.META_PSEUDOPREPAID, utils.META_POSTPAID, utils.META_PREPAID, utils.PSEUDOPREPAID, utils.POSTPAID, utils.PREPAID}, cdr.RequestType) { // Prepaid - Cost can be recalculated in case of missing records from SM - err = self.rater.Debit(cd, cc) + err = self.rals.Call("Responder.Debit", cd, cc) } else { - err = self.rater.GetCost(cd, cc) + err = self.rals.Call("Responder.GetCost", cd, cc) } if err != nil { return cc, err @@ -412,7 +445,11 @@ func (self *CdrServer) replicateCdr(cdr *CDR) error { body = cdr.AsHttpForm() case utils.META_HTTP_JSON: content = utils.CONTENT_JSON - body = cdr + jsn, err := json.Marshal(cdr) + if err != nil { + return err + } + body = jsn } errChan := make(chan error) go func(body interface{}, rplCfg *config.CdrReplicationCfg, content string, errChan chan error) { @@ -420,7 +457,7 @@ func (self *CdrServer) replicateCdr(cdr *CDR) error { self.cgrCfg.HttpFailedDir, rplCfg.FallbackFileName()) _, err := utils.HttpPoster( - rplCfg.Server, self.cgrCfg.HttpSkipTlsVerify, body, + rplCfg.Address, self.cgrCfg.HttpSkipTlsVerify, body, content, rplCfg.Attempts, fallbackPath) if err != nil { utils.Logger.Err(fmt.Sprintf( @@ -437,3 +474,103 @@ func (self *CdrServer) replicateCdr(cdr *CDR) error { } return nil } + +// Called by rate/re-rate API, FixMe: deprecate it once new APIer structure is operational +func (self *CdrServer) RateCDRs(cdrFltr *utils.CDRsFilter, sendToStats bool) error { + cdrs, _, err := self.cdrDb.GetCDRs(cdrFltr, false) + if err != nil { + return err + } + for _, cdr := range cdrs { + if err := self.deriveRateStoreStatsReplicate(cdr, self.cgrCfg.CDRSStoreCdrs, sendToStats, len(self.cgrCfg.CDRSCdrReplication) != 0); err != nil { + utils.Logger.Err(fmt.Sprintf(" Processing CDR %+v, got error: %s", cdr, err.Error())) + } + } + return nil +} + +func (self *CdrServer) V1ProcessCDR(cdr *CDR, reply *string) error { + cacheKey := "ProcessCdr" + cdr.CGRID + if item, err := self.getCache().Get(cacheKey); err == nil && item != nil { + *reply = item.Value.(string) + return item.Err + } + if err := self.LocalProcessCdr(cdr); err != nil { + self.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) + return utils.NewErrServerError(err) + } + self.getCache().Cache(cacheKey, &cache2go.CacheItem{Value: utils.OK}) + *reply = utils.OK + return nil +} + +// Alias, deprecated after removing CdrServerV1.ProcessCdr +func (self *CdrServer) V1ProcessCdr(cdr *CDR, reply *string) error { + return self.V1ProcessCDR(cdr, reply) +} + +// RPC method, differs from storeSMCost through it's signature +func (self *CdrServer) V1StoreSMCost(attr AttrCDRSStoreSMCost, reply *string) error { + if err := self.storeSMCost(attr.Cost, attr.CheckDuplicate); err != nil { + return utils.NewErrServerError(err) + } + *reply = utils.OK + return nil +} + +// Called by rate/re-rate API, RPC method +func (self *CdrServer) V1RateCDRs(attrs utils.AttrRateCDRs, reply *string) error { + cdrFltr, err := attrs.RPCCDRsFilter.AsCDRsFilter(self.cgrCfg.DefaultTimezone) + if err != nil { + return utils.NewErrServerError(err) + } + cdrs, _, err := self.cdrDb.GetCDRs(cdrFltr, false) + if err != nil { + return err + } + storeCDRs := self.cgrCfg.CDRSStoreCdrs + if attrs.StoreCDRs != nil { + storeCDRs = *attrs.StoreCDRs + } + sendToStats := self.stats != nil + if attrs.SendToStatS != nil { + sendToStats = *attrs.SendToStatS + } + replicate := len(self.cgrCfg.CDRSCdrReplication) != 0 + if attrs.ReplicateCDRs != nil { + replicate = *attrs.ReplicateCDRs + } + for _, cdr := range cdrs { + if err := self.deriveRateStoreStatsReplicate(cdr, storeCDRs, sendToStats, replicate); err != nil { + utils.Logger.Err(fmt.Sprintf(" Processing CDR %+v, got error: %s", cdr, err.Error())) + } + } + return nil +} + +func (cdrsrv *CdrServer) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(cdrsrv).MethodByName(parts[0][len(parts[0])-2:] + parts[1]) // Inherit the version in the method + if !method.IsValid() { + return utils.ErrNotImplemented + } + + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err +} diff --git a/engine/guardian.go b/engine/guardian.go index dc2975aff..6d6664e39 100644 --- a/engine/guardian.go +++ b/engine/guardian.go @@ -26,10 +26,6 @@ import ( // global package variable var Guardian = &GuardianLock{locksMap: make(map[string]chan bool)} -func NewGuardianLock() *GuardianLock { - return &GuardianLock{locksMap: make(map[string]chan bool)} -} - type GuardianLock struct { locksMap map[string]chan bool mu sync.RWMutex diff --git a/engine/guardian_test.go b/engine/guardian_test.go index a8e3cc4f8..6e436e9d5 100644 --- a/engine/guardian_test.go +++ b/engine/guardian_test.go @@ -19,31 +19,33 @@ along with this program. If not, see package engine import ( - "log" "testing" "time" ) -func ATestAccountLock(t *testing.T) { +func BenchmarkGuard(b *testing.B) { for i := 0; i < 100; i++ { go Guardian.Guard(func() (interface{}, error) { - log.Print("first 1") time.Sleep(1 * time.Millisecond) - log.Print("end first 1") return 0, nil }, 0, "1") go Guardian.Guard(func() (interface{}, error) { - log.Print("first 2") time.Sleep(1 * time.Millisecond) - log.Print("end first 2") return 0, nil }, 0, "2") go Guardian.Guard(func() (interface{}, error) { - log.Print("second 1") time.Sleep(1 * time.Millisecond) - log.Print("end second 1") return 0, nil }, 0, "1") } - time.Sleep(10 * time.Second) + +} + +func BenchmarkGuardian(b *testing.B) { + for i := 0; i < 100; i++ { + go Guardian.Guard(func() (interface{}, error) { + time.Sleep(1 * time.Millisecond) + return 0, nil + }, 0, "1") + } } diff --git a/engine/handler_derivedcharging.go b/engine/handler_derivedcharging.go index 52f48eb03..e845ac1dd 100644 --- a/engine/handler_derivedcharging.go +++ b/engine/handler_derivedcharging.go @@ -50,12 +50,10 @@ func DerivedChargersMatchesDest(dcs *utils.DerivedChargers, dest string) bool { for _, p := range utils.SplitPrefix(dest, MIN_PREFIX_MATCH) { if x, err := cache2go.Get(utils.DESTINATION_PREFIX + p); err == nil { destIds := x.(map[interface{}]struct{}) - for value := range dcs.DestinationIDs { - for idId := range destIds { - dId := idId.(string) - if value == dId { - return true - } + for dId := range destIds { + includeDest, found := dcs.DestinationIDs[dId.(string)] + if found { + return includeDest } } } diff --git a/engine/handler_derivedcharging_test.go b/engine/handler_derivedcharging_test.go index 8a2c98f54..bb793b9ba 100644 --- a/engine/handler_derivedcharging_test.go +++ b/engine/handler_derivedcharging_test.go @@ -119,3 +119,21 @@ func TestHandleDeivedChargersMatchDestNatRet(t *testing.T) { t.Error("Derived charger failed to match dest") } } + +func TestHandleDeivedChargersMatchDestSpec(t *testing.T) { + dcs := &utils.DerivedChargers{ + DestinationIDs: utils.NewStringMap("NAT", "SPEC"), + } + if !DerivedChargersMatchesDest(dcs, "0723045326") { + t.Error("Derived charger failed to match dest") + } +} + +func TestHandleDeivedChargersMatchDestNegativeSpec(t *testing.T) { + dcs := &utils.DerivedChargers{ + DestinationIDs: utils.NewStringMap("NAT", "!SPEC"), + } + if DerivedChargersMatchesDest(dcs, "0723045326") { + t.Error("Derived charger failed to match dest") + } +} diff --git a/engine/history_test.go b/engine/history_test.go index 3e51c5a4a..e3d797e4b 100644 --- a/engine/history_test.go +++ b/engine/history_test.go @@ -47,6 +47,7 @@ func TestHistoryDestinations(t *testing.T) { {"Id":"PSTN_71","Prefixes":["+4971"]}, {"Id":"PSTN_72","Prefixes":["+4972"]}, {"Id":"RET","Prefixes":["0723","0724"]}, +{"Id":"SPEC","Prefixes":["0723045"]}, {"Id":"URG","Prefixes":["112"]}` if !strings.Contains(buf.String(), expected) { t.Error("Error in destination history content:", buf.String()) diff --git a/engine/lcr_test.go b/engine/lcr_test.go index ddf112efb..49f201cff 100644 --- a/engine/lcr_test.go +++ b/engine/lcr_test.go @@ -210,6 +210,24 @@ func TestLcrGet(t *testing.T) { } } +func TestLcrGetPrefix(t *testing.T) { + lcrSubjectPrefixMatching = true + cd := &CallDescriptor{ + TimeStart: time.Date(2015, 04, 06, 17, 40, 0, 0, time.UTC), + TimeEnd: time.Date(2015, 04, 06, 17, 41, 0, 0, time.UTC), + Tenant: "cgrates.org", + Direction: "*in", + Category: "call", + Destination: "0723098765", + Account: "rif", + Subject: "rifus", + } + lcr, err := cd.GetLCR(nil, nil) + if err != nil || lcr == nil { + t.Errorf("Bad lcr: %+v, %v", lcr, err) + } +} + func TestLcrRequestAsCallDescriptor(t *testing.T) { sTime := time.Date(2015, 04, 06, 17, 40, 0, 0, time.UTC) callDur := time.Duration(1) * time.Minute diff --git a/engine/libengine.go b/engine/libengine.go new file mode 100644 index 000000000..b555f596a --- /dev/null +++ b/engine/libengine.go @@ -0,0 +1,57 @@ +/* +Real-time Charging System for Telecom & ISP environments +Copyright (C) ITsysCOM GmbH + +This program is free software: you can Storagetribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITH*out ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package engine + +import ( + "errors" + "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" +) + +func NewRPCPool(dispatchStrategy string, connAttempts, reconnects int, codec string, + rpcConnCfgs []*config.HaPoolConfig, internalConnChan chan rpcclient.RpcClientConnection, ttl time.Duration) (*rpcclient.RpcClientPool, error) { + var rpcClient *rpcclient.RpcClient + var err error + rpcPool := rpcclient.NewRpcClientPool(dispatchStrategy) + for _, rpcConnCfg := range rpcConnCfgs { + if rpcConnCfg.Address == utils.MetaInternal { + var internalConn rpcclient.RpcClientConnection + select { + case internalConn = <-internalConnChan: + internalConnChan <- internalConn + case <-time.After(ttl): + return nil, errors.New("TTL triggered") + } + rpcClient, err = rpcclient.NewRpcClient("", "", 0, 0, rpcclient.INTERNAL_RPC, internalConn) + } else { + rpcClient, err = rpcclient.NewRpcClient("tcp", rpcConnCfg.Address, connAttempts, reconnects, codec, nil) + } + if err != nil { + break + } + rpcPool.AddClient(rpcClient) + } + if err != nil { + return nil, err + } + return rpcPool, nil +} diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index 7c17f02fe..fbcf9e8f2 100644 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -42,6 +42,7 @@ NAT,0723 NAT,+49 RET,0723 RET,0724 +SPEC,0723045 PSTN_71,+4971 PSTN_72,+4972 PSTN_70,+4970 @@ -172,11 +173,14 @@ EE0,*topup_reset,,,,*monetary,*out,,,,SG3,*unlimited,,0,10,false,false,10 EE0,*allow_negative,,,,*monetary,*out,,,,,*unlimited,,0,10,false,false,10 DEFEE,*cdrlog,"{""Category"":""^ddi"",""MediationRunId"":""^did_run""}",,,,,,,,,,,,,false,false,10 NEG,*allow_negative,,,,*monetary,*out,,,,,*unlimited,,0,10,false,false,10 -BLOCK,*topup,,,bblocker,*monetary,*out,,NAT,,,*unlimited,,10,20,true,false,20 +BLOCK,*topup,,,bblocker,*monetary,*out,,NAT,,,*unlimited,,1,20,true,false,20 BLOCK,*topup,,,bfree,*monetary,*out,,,,,*unlimited,,20,10,false,false,10 +BLOCK_EMPTY,*topup,,,bblocker,*monetary,*out,,NAT,,,*unlimited,,0,20,true,false,20 +BLOCK_EMPTY,*topup,,,bfree,*monetary,*out,,,,,*unlimited,,20,10,false,false,10 FILTER,*topup,,"{""*and"":[{""Value"":{""*lt"":0}},{""Id"":{""*eq"":""*default""}}]}",bfree,*monetary,*out,,,,,*unlimited,,20,10,false,false,10 EXP,*topup,,,,*voice,*out,,,,,*monthly,*any,300,10,false,false,10 NOEXP,*topup,,,,*voice,*out,,,,,*unlimited,*any,50,10,false,false,10 +VF,*debit,,,,*monetary,*out,,,,,*unlimited,*any,"{""Method"":""*incremental"",""Params"":{""Units"":10, ""Interval"":""month"", ""Increment"":""day""}}",10,false,false,10 ` actionPlans = ` MORE_MINUTES,MINI,ONE_TIME_RUN,10 @@ -188,6 +192,7 @@ TOPUP_SHARED10_AT,SE10,*asap,10 TOPUP_EMPTY_AT,EE0,*asap,10 POST_AT,NEG,*asap,10 BLOCK_AT,BLOCK,*asap,10 +BLOCK_EMPTY_AT,BLOCK_EMPTY,*asap,10 EXP_AT,EXP,*asap,10 ` @@ -216,8 +221,10 @@ vdf,emptyY,TOPUP_EMPTY_AT,,, vdf,post,POST_AT,,, cgrates.org,alodis,TOPUP_EMPTY_AT,,true,true cgrates.org,block,BLOCK_AT,,false,false +cgrates.org,block_empty,BLOCK_EMPTY_AT,,false,false cgrates.org,expo,EXP_AT,,false,false cgrates.org,expnoexp,,,false,false +cgrates.org,vf,,,false,false ` derivedCharges = ` @@ -315,7 +322,7 @@ func init() { } func TestLoadDestinations(t *testing.T) { - if len(csvr.destinations) != 12 { + if len(csvr.destinations) != 13 { t.Error("Failed to load destinations: ", len(csvr.destinations)) } for _, d := range csvr.destinations { @@ -820,13 +827,13 @@ func TestLoadRatingProfiles(t *testing.T) { } func TestLoadActions(t *testing.T) { - if len(csvr.actions) != 13 { + if len(csvr.actions) != 15 { t.Error("Failed to load actions: ", len(csvr.actions)) } as1 := csvr.actions["MINI"] expected := []*Action{ &Action{ - Id: "MINI0", + Id: "MINI", ActionType: TOPUP_RESET, ExpirationString: UNLIMITED, ExtraParameters: "", @@ -835,7 +842,7 @@ func TestLoadActions(t *testing.T) { Type: utils.StringPointer(utils.MONETARY), Uuid: as1[0].Balance.Uuid, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), - Value: utils.Float64Pointer(10), + Value: &utils.ValueFormula{Static: 10}, Weight: utils.Float64Pointer(10), DestinationIDs: nil, TimingIDs: nil, @@ -846,7 +853,7 @@ func TestLoadActions(t *testing.T) { }, }, &Action{ - Id: "MINI1", + Id: "MINI", ActionType: TOPUP, ExpirationString: UNLIMITED, ExtraParameters: "", @@ -855,7 +862,7 @@ func TestLoadActions(t *testing.T) { Type: utils.StringPointer(utils.VOICE), Uuid: as1[1].Balance.Uuid, Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), - Value: utils.Float64Pointer(100), + Value: &utils.ValueFormula{Static: 100}, Weight: utils.Float64Pointer(10), RatingSubject: utils.StringPointer("test"), DestinationIDs: utils.StringMapPointer(utils.NewStringMap("NAT")), @@ -868,12 +875,12 @@ func TestLoadActions(t *testing.T) { }, } if !reflect.DeepEqual(as1, expected) { - t.Errorf("Error loading action1: %+v", as1[0].Balance) + t.Errorf("Error loading action1: %s", utils.ToIJSON(as1)) } as2 := csvr.actions["SHARED"] expected = []*Action{ &Action{ - Id: "SHARED0", + Id: "SHARED", ActionType: TOPUP, ExpirationString: UNLIMITED, Weight: 10, @@ -882,7 +889,7 @@ func TestLoadActions(t *testing.T) { Directions: utils.StringMapPointer(utils.NewStringMap(utils.OUT)), DestinationIDs: nil, Uuid: as2[0].Balance.Uuid, - Value: utils.Float64Pointer(100), + Value: &utils.ValueFormula{Static: 100}, Weight: utils.Float64Pointer(10), SharedGroups: utils.StringMapPointer(utils.NewStringMap("SG1")), TimingIDs: nil, @@ -893,12 +900,12 @@ func TestLoadActions(t *testing.T) { }, } if !reflect.DeepEqual(as2, expected) { - t.Errorf("Error loading action: %+v", as2[0].Balance) + t.Errorf("Error loading action: %s", utils.ToIJSON(as2)) } as3 := csvr.actions["DEFEE"] expected = []*Action{ &Action{ - Id: "DEFEE0", + Id: "DEFEE", ActionType: CDRLOG, ExtraParameters: `{"Category":"^ddi","MediationRunId":"^did_run"}`, Weight: 10, @@ -1006,7 +1013,7 @@ func TestLoadLCRs(t *testing.T) { } func TestLoadActionTimings(t *testing.T) { - if len(csvr.actionPlans) != 8 { + if len(csvr.actionPlans) != 9 { t.Error("Failed to load action timings: ", len(csvr.actionPlans)) } atm := csvr.actionPlans["MORE_MINUTES"] @@ -1101,7 +1108,7 @@ func TestLoadActionTriggers(t *testing.T) { } func TestLoadAccountActions(t *testing.T) { - if len(csvr.accountActions) != 14 { + if len(csvr.accountActions) != 16 { t.Error("Failed to load account actions: ", len(csvr.accountActions)) } aa := csvr.accountActions["vdf:minitsboy"] diff --git a/engine/models.go b/engine/models.go index 85ce94362..127c04757 100644 --- a/engine/models.go +++ b/engine/models.go @@ -440,7 +440,7 @@ type TBLCDRs struct { ExtraInfo string CreatedAt time.Time UpdatedAt time.Time - DeletedAt time.Time + DeletedAt *time.Time } func (t TBLCDRs) TableName() string { @@ -451,10 +451,13 @@ type TBLSMCosts struct { ID int64 Cgrid string RunID string + OriginHost string + OriginID string CostSource string + Usage float64 CostDetails string CreatedAt time.Time - DeletedAt time.Time + DeletedAt *time.Time } func (t TBLSMCosts) TableName() string { diff --git a/engine/pubsub.go b/engine/pubsub.go index 2ae544df0..fa031d6aa 100644 --- a/engine/pubsub.go +++ b/engine/pubsub.go @@ -1,6 +1,7 @@ package engine import ( + "encoding/json" "errors" "fmt" "sync" @@ -28,13 +29,6 @@ func (ce CgrEvent) PassFilters(rsrFields utils.RSRFields) bool { return true } -type PublisherSubscriber interface { - Subscribe(SubscribeInfo, *string) error - Unsubscribe(SubscribeInfo, *string) error - Publish(CgrEvent, *string) error - ShowSubscribers(string, *map[string]*SubscriberData) error -} - type SubscriberData struct { ExpTime time.Time Filters utils.RSRFields @@ -43,7 +37,7 @@ type SubscriberData struct { type PubSub struct { subscribers map[string]*SubscriberData ttlVerify bool - pubFunc func(string, bool, interface{}) ([]byte, error) + pubFunc func(string, bool, []byte) ([]byte, error) mux *sync.Mutex accountDb AccountingStorage } @@ -139,13 +133,17 @@ func (ps *PubSub) Publish(evt CgrEvent, reply *string) error { } transport := split[0] address := split[1] - + ttlVerify := ps.ttlVerify + jsn, err := json.Marshal(evt) + if err != nil { + return err + } switch transport { case utils.META_HTTP_POST: go func() { delay := utils.Fib() for i := 0; i < 5; i++ { // Loop so we can increase the success rate on best effort - if _, err := ps.pubFunc(address, ps.ttlVerify, evt); err == nil { + if _, err := ps.pubFunc(address, ttlVerify, jsn); err == nil { break // Success, no need to reinterate } else if i == 4 { // Last iteration, syslog the warning utils.Logger.Warning(fmt.Sprintf(" Failed calling url: [%s], error: [%s], event type: %s", address, err.Error(), evt["EventName"])) @@ -165,7 +163,6 @@ func (ps *PubSub) ShowSubscribers(in string, out *map[string]*SubscriberData) er return nil } -// rpcclient.RpcClientConnection interface func (ps *PubSub) Call(serviceMethod string, args interface{}, reply interface{}) error { switch serviceMethod { case "PubSubV1.Subscribe": diff --git a/engine/pubsub_test.go b/engine/pubsub_test.go index 5345a06c0..ac28837a8 100644 --- a/engine/pubsub_test.go +++ b/engine/pubsub_test.go @@ -116,43 +116,9 @@ func TestUnsubscribeSave(t *testing.T) { } } -func TestPublish(t *testing.T) { - ps := NewPubSub(accountingStorage, true) - ps.pubFunc = func(url string, ttl bool, obj interface{}) ([]byte, error) { - obj.(CgrEvent)["called"] = url - return nil, nil - } - var r string - if err := ps.Subscribe(SubscribeInfo{ - EventFilter: "EventName/test", - Transport: utils.META_HTTP_POST, - Address: "url", - LifeSpan: time.Second, - }, &r); err != nil { - t.Error("Error subscribing: ", err) - } - m := make(map[string]string) - m["EventFilter"] = "test" - if err := ps.Publish(m, &r); err != nil { - t.Error("Error publishing: ", err) - } - for i := 0; i < 1000; i++ { // wait for the theread to populate map - if len(m) == 2 { - time.Sleep(time.Microsecond) - } else { - break - } - } - if r, exists := m["called"]; !exists || r != "url" { - t.Error("Error calling publish function: ", m) - } -} - func TestPublishExpired(t *testing.T) { ps := NewPubSub(accountingStorage, true) - ps.pubFunc = func(url string, ttl bool, obj interface{}) ([]byte, error) { - m := obj.(map[string]string) - m["called"] = "yes" + ps.pubFunc = func(url string, ttl bool, obj []byte) ([]byte, error) { return nil, nil } var r string @@ -174,9 +140,7 @@ func TestPublishExpired(t *testing.T) { func TestPublishExpiredSave(t *testing.T) { ps := NewPubSub(accountingStorage, true) - ps.pubFunc = func(url string, ttl bool, obj interface{}) ([]byte, error) { - m := obj.(map[string]string) - m["called"] = "yes" + ps.pubFunc = func(url string, ttl bool, obj []byte) ([]byte, error) { return nil, nil } var r string diff --git a/engine/rateinterval.go b/engine/rateinterval.go index 0ddcb241d..0b0b13ed2 100644 --- a/engine/rateinterval.go +++ b/engine/rateinterval.go @@ -289,7 +289,7 @@ Returns true if the received time result inside the interval */ func (i *RateInterval) Contains(t time.Time, endTime bool) bool { if endTime { - if t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 { // back one second to 23:59:59 + if utils.TimeIs0h(t) { // back one second to 23:59:59 t = t.Add(-1 * time.Second) } } @@ -360,23 +360,6 @@ func (ri *RateInterval) GetMaxCost() (float64, string) { // Structure to store intervals according to weight type RateIntervalList []*RateInterval -func (il RateIntervalList) Len() int { - return len(il) -} - -func (il RateIntervalList) Swap(i, j int) { - il[i], il[j] = il[j], il[i] -} - -// we need higher weights earlyer in the list -func (il RateIntervalList) Less(j, i int) bool { - return il[i].Weight < il[j].Weight //|| il[i].Timing.StartTime > il[j].Timing.StartTime -} - -func (il RateIntervalList) Sort() { - sort.Sort(il) -} - // Structure to store intervals according to weight type RateIntervalTimeSorter struct { referenceTime time.Time @@ -393,6 +376,9 @@ func (il *RateIntervalTimeSorter) Swap(i, j int) { // we need higher weights earlyer in the list func (il *RateIntervalTimeSorter) Less(j, i int) bool { + if il.ris[i].Weight < il.ris[j].Weight { + return il.ris[i].Weight < il.ris[j].Weight + } t1 := il.ris[i].Timing.getLeftMargin(il.referenceTime) t2 := il.ris[j].Timing.getLeftMargin(il.referenceTime) return t1.After(t2) diff --git a/engine/ratingplan.go b/engine/ratingplan.go index 3f1214477..fd902ed8e 100644 --- a/engine/ratingplan.go +++ b/engine/ratingplan.go @@ -59,12 +59,7 @@ func (rp *RatingPlan) RateIntervalList(dId string) RateIntervalList { return ril } -/* -type xCachedRatingPlan struct { - rp *RatingPlan - *cache2go.XEntry -} -*/ +// no sorter because it's sorted with RateIntervalTimeSorter /* Adds one ore more intervals to the internal interval list only if it is not allready in the list. diff --git a/engine/ratingprofile.go b/engine/ratingprofile.go index f0ebf0ac3..e72665a22 100644 --- a/engine/ratingprofile.go +++ b/engine/ratingprofile.go @@ -92,28 +92,36 @@ type RatingInfo struct { // SelectRatingIntevalsForTimespan orders rate intervals in time preserving only those which aply to the specified timestamp func (ri RatingInfo) SelectRatingIntevalsForTimespan(ts *TimeSpan) (result RateIntervalList) { - ri.RateIntervals.Sort() sorter := &RateIntervalTimeSorter{referenceTime: ts.TimeStart, ris: ri.RateIntervals} rateIntervals := sorter.Sort() // get the rating interval closest to begining of timespan var delta time.Duration = -1 var bestRateIntervalIndex int + var bestIntervalWeight float64 for index, rateInterval := range rateIntervals { if !rateInterval.Contains(ts.TimeStart, false) { continue } + if rateInterval.Weight < bestIntervalWeight { + break // don't consider lower weights' + } startTime := rateInterval.Timing.getLeftMargin(ts.TimeStart) tmpDelta := ts.TimeStart.Sub(startTime) if (startTime.Before(ts.TimeStart) || startTime.Equal(ts.TimeStart)) && (delta == -1 || tmpDelta < delta) { bestRateIntervalIndex = index + bestIntervalWeight = rateInterval.Weight delta = tmpDelta } } result = append(result, rateIntervals[bestRateIntervalIndex]) // check if later rating intervals influence this timespan + //log.Print("RIS: ", utils.ToIJSON(rateIntervals)) for i := bestRateIntervalIndex + 1; i < len(rateIntervals); i++ { + if rateIntervals[i].Weight < bestIntervalWeight { + break // don't consider lower weights' + } startTime := rateIntervals[i].Timing.getLeftMargin(ts.TimeStart) if startTime.Before(ts.TimeEnd) { result = append(result, rateIntervals[i]) @@ -256,8 +264,9 @@ func RatingProfileSubjectPrefixMatching(key string) (rp *RatingProfile, err erro lastIndex := strings.LastIndex(key, utils.CONCATENATED_KEY_SEP) baseKey := key[:lastIndex] subject := key[lastIndex:] - for i := 1; i < len(subject)-1; i++ { - if rp, err = ratingStorage.GetRatingProfile(baseKey+subject[:len(subject)-i], false); err == nil { + lenSubject := len(subject) + for i := 1; i < lenSubject-1; i++ { + if rp, err = ratingStorage.GetRatingProfile(baseKey+subject[:lenSubject-i], false); err == nil { return rp, err } } diff --git a/engine/ratingprofile_test.go b/engine/ratingprofile_test.go index 4f929029a..08cfecacd 100644 --- a/engine/ratingprofile_test.go +++ b/engine/ratingprofile_test.go @@ -250,6 +250,143 @@ func TestRatingProfileRIforTSMidnight(t *testing.T) { } } +func TestRatingProfileYearMonthDay(t *testing.T) { + ri := &RatingInfo{ + RateIntervals: RateIntervalList{ + &RateInterval{ + Timing: &RITiming{ + StartTime: "09:00:00", + }, + }, + &RateInterval{ + Timing: &RITiming{ + StartTime: "00:00:00", + }, + }, + &RateInterval{ + Timing: &RITiming{ + Years: utils.Years{2016}, + Months: utils.Months{1}, + MonthDays: utils.MonthDays{6, 7}, + WeekDays: utils.WeekDays{}, + StartTime: "19:00:00", + }, + }, + }, + } + ts := &TimeSpan{ + TimeStart: time.Date(2016, 1, 6, 23, 40, 0, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 7, 1, 1, 30, 0, time.UTC), + } + rIntervals := ri.SelectRatingIntevalsForTimespan(ts) + if len(rIntervals) != 1 || + rIntervals[0].Timing.StartTime != "19:00:00" { + t.Error("Wrong interval list: ", utils.ToIJSON(rIntervals)) + } +} + +func TestRatingProfileWeighted(t *testing.T) { + ri := &RatingInfo{ + RateIntervals: RateIntervalList{ + &RateInterval{ + Timing: &RITiming{ + StartTime: "09:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + StartTime: "00:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + StartTime: "19:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + Years: utils.Years{2016}, + Months: utils.Months{1}, + MonthDays: utils.MonthDays{6}, + WeekDays: utils.WeekDays{}, + StartTime: "00:00:00", + }, + Weight: 11, + }, + }, + } + ts := &TimeSpan{ + TimeStart: time.Date(2016, 1, 6, 23, 40, 0, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 6, 23, 45, 30, 0, time.UTC), + } + rIntervals := ri.SelectRatingIntevalsForTimespan(ts) + if len(rIntervals) != 1 || + rIntervals[0].Timing.StartTime != "00:00:00" || + rIntervals[0].Weight != 11 { + t.Error("Wrong interval list: ", utils.ToIJSON(rIntervals)) + } +} + +func TestRatingProfileWeightedMultiple(t *testing.T) { + ri := &RatingInfo{ + RateIntervals: RateIntervalList{ + &RateInterval{ + Timing: &RITiming{ + StartTime: "09:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + StartTime: "00:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + StartTime: "19:00:00", + }, + Weight: 10, + }, + &RateInterval{ + Timing: &RITiming{ + Years: utils.Years{2016}, + Months: utils.Months{1}, + MonthDays: utils.MonthDays{6}, + WeekDays: utils.WeekDays{}, + StartTime: "00:00:00", + }, + Weight: 11, + }, + &RateInterval{ + Timing: &RITiming{ + Years: utils.Years{2016}, + Months: utils.Months{1}, + MonthDays: utils.MonthDays{6}, + WeekDays: utils.WeekDays{}, + StartTime: "18:00:00", + }, + Weight: 11, + }, + }, + } + ts := &TimeSpan{ + TimeStart: time.Date(2016, 1, 6, 17, 40, 0, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 6, 23, 45, 30, 0, time.UTC), + } + rIntervals := ri.SelectRatingIntevalsForTimespan(ts) + if len(rIntervals) != 2 || + rIntervals[0].Timing.StartTime != "00:00:00" || + rIntervals[0].Weight != 11 || + rIntervals[1].Timing.StartTime != "18:00:00" || + rIntervals[1].Weight != 11 { + t.Error("Wrong interval list: ", utils.ToIJSON(rIntervals)) + } +} + func TestRatingProfileSubjectPrefixMatching(t *testing.T) { rpSubjectPrefixMatching = true rp, err := RatingProfileSubjectPrefixMatching("*out:cgrates.org:data:rif") diff --git a/engine/responder.go b/engine/responder.go index b6d12c545..751d3299c 100644 --- a/engine/responder.go +++ b/engine/responder.go @@ -49,21 +49,16 @@ type AttrGetLcr struct { type Responder struct { Bal *balancer2go.Balancer ExitChan chan bool - CdrSrv *CdrServer - Stats StatsInterface + Stats rpcclient.RpcClientConnection Timeout time.Duration Timezone string cnt int64 responseCache *cache2go.ResponseCache } -func NewResponder(exitChan chan bool, cdrSrv *CdrServer, stats StatsInterface, timeout, timeToLive time.Duration) *Responder { - return &Responder{ - ExitChan: exitChan, - Stats: stats, - Timeout: timeToLive, - responseCache: cache2go.NewResponseCache(timeToLive), - } +func (rs *Responder) SetTimeToLive(timeToLive time.Duration, out *int) error { + rs.responseCache = cache2go.NewResponseCache(timeToLive) + return nil } func (rs *Responder) getCache() *cache2go.ResponseCache { @@ -98,7 +93,6 @@ func (rs *Responder) GetCost(arg *CallDescriptor, reply *CallCost) (err error) { }, arg, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { return err } - if rs.Bal != nil { r, e := rs.getCallCost(arg, "Responder.GetCost") *reply, err = *r, e @@ -153,7 +147,8 @@ func (rs *Responder) Debit(arg *CallDescriptor, reply *CallCost) (err error) { } func (rs *Responder) MaxDebit(arg *CallDescriptor, reply *CallCost) (err error) { - if item, err := rs.getCache().Get(utils.MAX_DEBIT_CACHE_PREFIX + arg.CgrID + arg.RunID); err == nil && item != nil { + cacheKey := utils.MAX_DEBIT_CACHE_PREFIX + arg.CgrID + arg.RunID + arg.DurationIndex.String() + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { *reply = *(item.Value.(*CallCost)) return item.Err } @@ -184,7 +179,7 @@ func (rs *Responder) MaxDebit(arg *CallDescriptor, reply *CallCost) (err error) } else { r, e := arg.MaxDebit() if e != nil { - rs.getCache().Cache(utils.MAX_DEBIT_CACHE_PREFIX+arg.CgrID+arg.RunID, &cache2go.CacheItem{ + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ Err: e, }) return e @@ -192,7 +187,7 @@ func (rs *Responder) MaxDebit(arg *CallDescriptor, reply *CallCost) (err error) *reply = *r } } - rs.getCache().Cache(utils.MAX_DEBIT_CACHE_PREFIX+arg.CgrID+arg.RunID, &cache2go.CacheItem{ + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ Value: reply, Err: err, }) @@ -200,7 +195,8 @@ func (rs *Responder) MaxDebit(arg *CallDescriptor, reply *CallCost) (err error) } func (rs *Responder) RefundIncrements(arg *CallDescriptor, reply *float64) (err error) { - if item, err := rs.getCache().Get(utils.REFUND_INCR_CACHE_PREFIX + arg.CgrID + arg.RunID); err == nil && item != nil { + cacheKey := utils.REFUND_INCR_CACHE_PREFIX + arg.CgrID + arg.RunID + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { *reply = *(item.Value.(*float64)) return item.Err } @@ -222,6 +218,9 @@ func (rs *Responder) RefundIncrements(arg *CallDescriptor, reply *float64) (err Subject: arg.Subject, Context: utils.ALIAS_CONTEXT_RATING, }, arg, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ + Err: err, + }) return err } @@ -230,7 +229,7 @@ func (rs *Responder) RefundIncrements(arg *CallDescriptor, reply *float64) (err } else { err = arg.RefundIncrements() } - rs.getCache().Cache(utils.REFUND_INCR_CACHE_PREFIX+arg.CgrID+arg.RunID, &cache2go.CacheItem{ + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ Value: reply, Err: err, }) @@ -238,7 +237,8 @@ func (rs *Responder) RefundIncrements(arg *CallDescriptor, reply *float64) (err } func (rs *Responder) RefundRounding(arg *CallDescriptor, reply *float64) (err error) { - if item, err := rs.getCache().Get(utils.REFUND_ROUND_CACHE_PREFIX + arg.CgrID + arg.RunID); err == nil && item != nil { + cacheKey := utils.REFUND_ROUND_CACHE_PREFIX + arg.CgrID + arg.RunID + arg.DurationIndex.String() + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { *reply = *(item.Value.(*float64)) return item.Err } @@ -260,6 +260,9 @@ func (rs *Responder) RefundRounding(arg *CallDescriptor, reply *float64) (err er Subject: arg.Subject, Context: utils.ALIAS_CONTEXT_RATING, }, arg, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ + Err: err, + }) return err } @@ -268,7 +271,7 @@ func (rs *Responder) RefundRounding(arg *CallDescriptor, reply *float64) (err er } else { err = arg.RefundRounding() } - rs.getCache().Cache(utils.REFUND_ROUND_CACHE_PREFIX+arg.CgrID+arg.RunID, &cache2go.CacheItem{ + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ Value: reply, Err: err, }) @@ -311,11 +314,17 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { if rs.Bal != nil { return errors.New("unsupported method on the balancer") } + cacheKey := utils.GET_DERIV_MAX_SESS_TIME + ev.CGRID + ev.RunID + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { + *reply = *(item.Value.(*float64)) + return item.Err + } if ev.Subject == "" { ev.Subject = ev.Account } // replace user profile fields if err := LoadUserProfile(ev, utils.EXTRA_FIELDS); err != nil { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } // replace aliases @@ -329,6 +338,7 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { Subject: ev.Subject, Context: utils.ALIAS_CONTEXT_RATING, }, ev, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } @@ -337,6 +347,7 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { Account: ev.GetAccount(utils.META_DEFAULT), Subject: ev.GetSubject(utils.META_DEFAULT)} dcs := &utils.DerivedChargers{} if err := rs.GetDerivedChargers(attrsDC, dcs); err != nil { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } dcs, _ = dcs.AppendDefaultRun() @@ -357,10 +368,12 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { } startTime, err := ev.GetSetupTime(utils.META_DEFAULT, rs.Timezone) if err != nil { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } usage, err := ev.GetDuration(utils.META_DEFAULT) if err != nil { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } if usage == 0 { @@ -368,7 +381,7 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { } cd := &CallDescriptor{ CgrID: ev.GetCgrId(rs.Timezone), - RunID: ev.RunID, + RunID: dc.RunID, TOR: ev.ToR, Direction: ev.GetDirection(dc.DirectionField), Tenant: ev.GetTenant(dc.TenantField), @@ -383,6 +396,7 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { err = rs.GetMaxSessionTime(cd, &remainingDuration) if err != nil { *reply = 0 + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } if utils.IsSliceMember([]string{utils.META_POSTPAID, utils.POSTPAID}, ev.GetReqType(dc.RequestTypeField)) { @@ -396,6 +410,7 @@ func (rs *Responder) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { maxCallDuration = remainingDuration } } + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Value: maxCallDuration}) *reply = maxCallDuration return nil } @@ -405,6 +420,11 @@ func (rs *Responder) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { if rs.Bal != nil { return errors.New("Unsupported method on the balancer") } + cacheKey := utils.GET_SESS_RUNS_CACHE_PREFIX + ev.CGRID + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { + *sRuns = *(item.Value.(*[]*SessionRun)) + return item.Err + } if ev.Subject == "" { ev.Subject = ev.Account } @@ -433,7 +453,7 @@ func (rs *Responder) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { //utils.Logger.Info(fmt.Sprintf("Derived chargers for: %+v", attrsDC)) dcs := &utils.DerivedChargers{} if err := rs.GetDerivedChargers(attrsDC, dcs); err != nil { - rs.getCache().Cache(utils.GET_SESS_RUNS_CACHE_PREFIX+ev.CGRID, &cache2go.CacheItem{ + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{ Err: err, }) return err @@ -447,22 +467,18 @@ func (rs *Responder) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { } startTime, err := ev.GetAnswerTime(dc.AnswerTimeField, rs.Timezone) if err != nil { - rs.getCache().Cache(utils.GET_SESS_RUNS_CACHE_PREFIX+ev.CGRID, &cache2go.CacheItem{ - Err: err, - }) + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return errors.New("Error parsing answer event start time") } endTime, err := ev.GetEndTime("", rs.Timezone) if err != nil { - rs.getCache().Cache(utils.GET_SESS_RUNS_CACHE_PREFIX+ev.CGRID, &cache2go.CacheItem{ - Err: err, - }) + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return errors.New("Error parsing answer event end time") } extraFields := ev.GetExtraFields() cd := &CallDescriptor{ CgrID: ev.GetCgrId(rs.Timezone), - RunID: ev.RunID, + RunID: dc.RunID, TOR: ev.ToR, Direction: ev.GetDirection(dc.DirectionField), Tenant: ev.GetTenant(dc.TenantField), @@ -483,9 +499,7 @@ func (rs *Responder) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { } //utils.Logger.Info(fmt.Sprintf("RUNS: %v", len(sesRuns))) *sRuns = sesRuns - rs.getCache().Cache(utils.GET_SESS_RUNS_CACHE_PREFIX+ev.CGRID, &cache2go.CacheItem{ - Value: sRuns, - }) + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Value: sRuns}) return nil } @@ -501,43 +515,12 @@ func (rs *Responder) GetDerivedChargers(attrs *utils.AttrDerivedChargers, dcs *u return nil } -func (rs *Responder) ProcessCdr(cdr *CDR, reply *string) error { - if rs.CdrSrv == nil { - return errors.New("CDR_SERVER_NOT_RUNNING") - } - if err := rs.CdrSrv.ProcessCdr(cdr); err != nil { - return err - } - *reply = utils.OK - return nil -} - -func (rs *Responder) LogCallCost(ccl *CallCostLog, reply *string) error { - if item, err := rs.getCache().Get(utils.LOG_CALL_COST_CACHE_PREFIX + ccl.CgrId); err == nil && item != nil { - *reply = item.Value.(string) +func (rs *Responder) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error { + cacheKey := utils.LCRCachePrefix + attrs.CgrID + attrs.RunID + if item, err := rs.getCache().Get(cacheKey); err == nil && item != nil { + *reply = *(item.Value.(*LCRCost)) return item.Err } - if rs.CdrSrv == nil { - err := errors.New("CDR_SERVER_NOT_RUNNING") - rs.getCache().Cache(utils.LOG_CALL_COST_CACHE_PREFIX+ccl.CgrId, &cache2go.CacheItem{ - Err: err, - }) - return err - } - if err := rs.CdrSrv.LogCallCost(ccl); err != nil { - rs.getCache().Cache(utils.LOG_CALL_COST_CACHE_PREFIX+ccl.CgrId, &cache2go.CacheItem{ - Err: err, - }) - return err - } - *reply = utils.OK - rs.getCache().Cache(utils.LOG_CALL_COST_CACHE_PREFIX+ccl.CgrId, &cache2go.CacheItem{ - Value: utils.OK, - }) - return nil -} - -func (rs *Responder) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error { if attrs.CallDescriptor.Subject == "" { attrs.CallDescriptor.Subject = attrs.CallDescriptor.Account } @@ -557,11 +540,12 @@ func (rs *Responder) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error { Subject: cd.Subject, Context: utils.ALIAS_CONTEXT_RATING, }, cd, utils.EXTRA_FIELDS); err != nil && err != utils.ErrNotFound { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } - lcrCost, err := attrs.CallDescriptor.GetLCR(rs.Stats, attrs.Paginator) if err != nil { + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Err: err}) return err } if lcrCost.Entry != nil && lcrCost.Entry.Strategy == LCR_STRATEGY_LOAD { @@ -569,6 +553,7 @@ func (rs *Responder) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error { suppl.Cost = -1 // In case of load distribution we don't calculate costs } } + rs.getCache().Cache(cacheKey, &cache2go.CacheItem{Value: lcrCost}) *reply = *lcrCost return nil } @@ -589,11 +574,12 @@ func (rs *Responder) Status(arg string, reply *map[string]interface{}) (err erro memstats := new(runtime.MemStats) runtime.ReadMemStats(memstats) response := make(map[string]interface{}) + response[utils.InstanceID] = config.CgrConfig().InstanceID if rs.Bal != nil { response["Raters"] = rs.Bal.GetClientAddresses() } - response["memstat"] = memstats.HeapAlloc / 1024 - response["footprint"] = memstats.Sys / 1024 + response["memstat"] = utils.SizeFmt(float64(memstats.HeapAlloc), "") + response["footprint"] = utils.SizeFmt(float64(memstats.Sys), "") *reply = response return } @@ -693,359 +679,29 @@ func (rs *Responder) GetTimeout(i int, d *time.Duration) error { return nil } -// Reflection worker type for not standalone balancer -type ResponderWorker struct{} - -func (rw *ResponderWorker) Call(serviceMethod string, args interface{}, reply interface{}) error { - methodName := strings.TrimLeft(serviceMethod, "Responder.") - switch args.(type) { - case CallDescriptor: - cd := args.(CallDescriptor) - switch reply.(type) { - case *CallCost: - rep := reply.(*CallCost) - method := reflect.ValueOf(&cd).MethodByName(methodName) - ret := method.Call([]reflect.Value{}) - *rep = *(ret[0].Interface().(*CallCost)) - case *float64: - rep := reply.(*float64) - method := reflect.ValueOf(&cd).MethodByName(methodName) - ret := method.Call([]reflect.Value{}) - *rep = *(ret[0].Interface().(*float64)) - } - case string: - switch methodName { - case "Status": - *(reply.(*string)) = "Local!" - case "Shutdown": - *(reply.(*string)) = "Done!" - } - +func (rs *Responder) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented } - return nil -} - -func (rw *ResponderWorker) Close() error { - return nil -} - -type Connector interface { - GetCost(*CallDescriptor, *CallCost) error - Debit(*CallDescriptor, *CallCost) error - MaxDebit(*CallDescriptor, *CallCost) error - RefundIncrements(*CallDescriptor, *float64) error - RefundRounding(*CallDescriptor, *float64) error - GetMaxSessionTime(*CallDescriptor, *float64) error - GetDerivedChargers(*utils.AttrDerivedChargers, *utils.DerivedChargers) error - GetDerivedMaxSessionTime(*CDR, *float64) error - GetSessionRuns(*CDR, *[]*SessionRun) error - ProcessCdr(*CDR, *string) error - LogCallCost(*CallCostLog, *string) error - GetLCR(*AttrGetLcr, *LCRCost) error - GetTimeout(int, *time.Duration) error -} - -type RPCClientConnector struct { - Client *rpcclient.RpcClient - Timeout time.Duration -} - -func (rcc *RPCClientConnector) GetCost(cd *CallDescriptor, cc *CallCost) error { - return rcc.Client.Call("Responder.GetCost", cd, cc) -} - -func (rcc *RPCClientConnector) Debit(cd *CallDescriptor, cc *CallCost) error { - return rcc.Client.Call("Responder.Debit", cd, cc) -} - -func (rcc *RPCClientConnector) MaxDebit(cd *CallDescriptor, cc *CallCost) error { - return rcc.Client.Call("Responder.MaxDebit", cd, cc) -} - -func (rcc *RPCClientConnector) RefundIncrements(cd *CallDescriptor, resp *float64) error { - return rcc.Client.Call("Responder.RefundIncrements", cd, resp) -} - -func (rcc *RPCClientConnector) RefundRounding(cd *CallDescriptor, resp *float64) error { - return rcc.Client.Call("Responder.RefundRounding", cd, resp) -} - -func (rcc *RPCClientConnector) GetMaxSessionTime(cd *CallDescriptor, resp *float64) error { - return rcc.Client.Call("Responder.GetMaxSessionTime", cd, resp) -} - -func (rcc *RPCClientConnector) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { - return rcc.Client.Call("Responder.GetDerivedMaxSessionTime", ev, reply) -} - -func (rcc *RPCClientConnector) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { - return rcc.Client.Call("Responder.GetSessionRuns", ev, sRuns) -} - -func (rcc *RPCClientConnector) GetDerivedChargers(attrs *utils.AttrDerivedChargers, dcs *utils.DerivedChargers) error { - return rcc.Client.Call("ApierV1.GetDerivedChargers", attrs, dcs) -} - -func (rcc *RPCClientConnector) ProcessCdr(cdr *CDR, reply *string) error { - return rcc.Client.Call("CdrsV1.ProcessCdr", cdr, reply) -} - -func (rcc *RPCClientConnector) LogCallCost(ccl *CallCostLog, reply *string) error { - return rcc.Client.Call("CdrsV1.LogCallCost", ccl, reply) -} - -func (rcc *RPCClientConnector) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error { - return rcc.Client.Call("Responder.GetLCR", attrs, reply) -} - -func (rcc *RPCClientConnector) GetTimeout(i int, d *time.Duration) error { - *d = rcc.Timeout - return nil -} - -type ConnectorPool []Connector - -func (cp ConnectorPool) GetCost(cd *CallDescriptor, cc *CallCost) error { - for _, con := range cp { - c := make(chan error, 1) - callCost := &CallCost{} - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetCost(cd, callCost) }() - select { - case err := <-c: - *cc = *callCost - return err - case <-time.After(timeout): - // call timed out, continue - } + // get method + method := reflect.ValueOf(rs).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented } - return utils.ErrTimedOut -} + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} -func (cp ConnectorPool) Debit(cd *CallDescriptor, cc *CallCost) error { - for _, con := range cp { - c := make(chan error, 1) - callCost := &CallCost{} - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.Debit(cd, callCost) }() - select { - case err := <-c: - *cc = *callCost - return err - case <-time.After(timeout): - // call timed out, continue - } + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) MaxDebit(cd *CallDescriptor, cc *CallCost) error { - for _, con := range cp { - c := make(chan error, 1) - callCost := &CallCost{} - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.MaxDebit(cd, callCost) }() - select { - case err := <-c: - *cc = *callCost - return err - case <-time.After(timeout): - // call timed out, continue - } + if ret[0].Interface() == nil { + return nil } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) RefundIncrements(cd *CallDescriptor, resp *float64) error { - for _, con := range cp { - c := make(chan error, 1) - var r float64 - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.RefundIncrements(cd, &r) }() - select { - case err := <-c: - *resp = r - return err - case <-time.After(timeout): - // call timed out, continue - } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) RefundRounding(cd *CallDescriptor, resp *float64) error { - for _, con := range cp { - c := make(chan error, 1) - var r float64 - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.RefundRounding(cd, &r) }() - select { - case err := <-c: - *resp = r - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetMaxSessionTime(cd *CallDescriptor, resp *float64) error { - for _, con := range cp { - c := make(chan error, 1) - var r float64 - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetMaxSessionTime(cd, &r) }() - select { - case err := <-c: - *resp = r - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetDerivedMaxSessionTime(ev *CDR, reply *float64) error { - for _, con := range cp { - c := make(chan error, 1) - var r float64 - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetDerivedMaxSessionTime(ev, &r) }() - select { - case err := <-c: - *reply = r - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetSessionRuns(ev *CDR, sRuns *[]*SessionRun) error { - for _, con := range cp { - c := make(chan error, 1) - sr := make([]*SessionRun, 0) - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetSessionRuns(ev, &sr) }() - select { - case err := <-c: - *sRuns = sr - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetDerivedChargers(attrs *utils.AttrDerivedChargers, dcs *utils.DerivedChargers) error { - for _, con := range cp { - c := make(chan error, 1) - derivedChargers := utils.DerivedChargers{} - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetDerivedChargers(attrs, &derivedChargers) }() - select { - case err := <-c: - *dcs = derivedChargers - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) ProcessCdr(cdr *CDR, reply *string) error { - for _, con := range cp { - c := make(chan error, 1) - var r string - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.ProcessCdr(cdr, &r) }() - select { - case err := <-c: - *reply = r - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) LogCallCost(ccl *CallCostLog, reply *string) error { - for _, con := range cp { - c := make(chan error, 1) - var r string - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.LogCallCost(ccl, &r) }() - select { - case err := <-c: - *reply = r - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetLCR(attr *AttrGetLcr, reply *LCRCost) error { - for _, con := range cp { - c := make(chan error, 1) - lcrCost := &LCRCost{} - - var timeout time.Duration - con.GetTimeout(0, &timeout) - - go func() { c <- con.GetLCR(attr, lcrCost) }() - select { - case err := <-c: - *reply = *lcrCost - return err - case <-time.After(timeout): - // call timed out, continue - } - } - return utils.ErrTimedOut -} - -func (cp ConnectorPool) GetTimeout(i int, d *time.Duration) error { - *d = 0 - return nil + return err } diff --git a/engine/responder_test.go b/engine/responder_test.go index 34b573151..9651382ec 100644 --- a/engine/responder_test.go +++ b/engine/responder_test.go @@ -18,6 +18,8 @@ along with this program. If not, see package engine import ( + "bytes" + "encoding/gob" "reflect" "testing" "time" @@ -151,10 +153,10 @@ func TestResponderGetSessionRuns(t *testing.T) { sesRuns := make([]*SessionRun, 0) eSRuns := []*SessionRun{ &SessionRun{DerivedCharger: extra1DC, - CallDescriptor: &CallDescriptor{CgrID: utils.Sha1("dsafdsaf", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), RunID: "*default", Direction: "*out", Category: "0", + CallDescriptor: &CallDescriptor{CgrID: utils.Sha1("dsafdsaf", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), RunID: "extra1", Direction: "*out", Category: "0", Tenant: "vdf", Subject: "rif", Account: "minitsboy", Destination: "0256", TimeStart: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC), TimeEnd: time.Date(2013, 11, 7, 8, 42, 36, 0, time.UTC), TOR: utils.VOICE, ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}}}, &SessionRun{DerivedCharger: extra2DC, - CallDescriptor: &CallDescriptor{CgrID: utils.Sha1("dsafdsaf", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), RunID: "*default", Direction: "*out", Category: "call", + CallDescriptor: &CallDescriptor{CgrID: utils.Sha1("dsafdsaf", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), RunID: "extra2", Direction: "*out", Category: "call", Tenant: "vdf", Subject: "ivo", Account: "ivo", Destination: "1002", TimeStart: time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC), TimeEnd: time.Date(2013, 11, 7, 8, 42, 36, 0, time.UTC), TOR: utils.VOICE, ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}}}, &SessionRun{DerivedCharger: dfDC, CallDescriptor: &CallDescriptor{CgrID: utils.Sha1("dsafdsaf", time.Date(2013, 11, 7, 8, 42, 26, 0, time.UTC).String()), RunID: "*default", Direction: "*out", Category: "call", @@ -163,7 +165,7 @@ func TestResponderGetSessionRuns(t *testing.T) { t.Error(err) } else if !reflect.DeepEqual(eSRuns, sesRuns) { for _, sr := range sesRuns { - t.Logf("sr cd: %+v", sr.CallDescriptor) + t.Logf("sr cd: %s", utils.ToIJSON(sr.CallDescriptor)) } t.Errorf("Expecting: %+v, received: %+v", eSRuns, sesRuns) } @@ -289,7 +291,8 @@ func TestResponderGetLCR(t *testing.T) { } } danStatsId := "dan12_stats" - rsponder.Stats.AddQueue(&CdrStats{Id: danStatsId, Supplier: []string{"dan12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, nil) + var r int + rsponder.Stats.Call("CDRStatsV1.AddQueue", &CdrStats{Id: danStatsId, Supplier: []string{"dan12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, &r) danRpfl := &RatingProfile{Id: "*out:tenant12:call:dan12", RatingPlanActivations: RatingPlanActivations{&RatingPlanActivation{ ActivationTime: time.Date(2015, 01, 01, 8, 0, 0, 0, time.UTC), @@ -299,7 +302,7 @@ func TestResponderGetLCR(t *testing.T) { }}, } rifStatsId := "rif12_stats" - rsponder.Stats.AddQueue(&CdrStats{Id: rifStatsId, Supplier: []string{"rif12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, nil) + rsponder.Stats.Call("CDRStatsV1.AddQueue", &CdrStats{Id: rifStatsId, Supplier: []string{"rif12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, &r) rifRpfl := &RatingProfile{Id: "*out:tenant12:call:rif12", RatingPlanActivations: RatingPlanActivations{&RatingPlanActivation{ ActivationTime: time.Date(2015, 01, 01, 8, 0, 0, 0, time.UTC), @@ -309,7 +312,7 @@ func TestResponderGetLCR(t *testing.T) { }}, } ivoStatsId := "ivo12_stats" - rsponder.Stats.AddQueue(&CdrStats{Id: ivoStatsId, Supplier: []string{"ivo12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, nil) + rsponder.Stats.Call("CDRStatsV1.AddQueue", &CdrStats{Id: ivoStatsId, Supplier: []string{"ivo12"}, Metrics: []string{ASR, PDD, ACD, TCD, ACC, TCC, DDC}}, &r) ivoRpfl := &RatingProfile{Id: "*out:tenant12:call:ivo12", RatingPlanActivations: RatingPlanActivations{&RatingPlanActivation{ ActivationTime: time.Date(2015, 01, 01, 8, 0, 0, 0, time.UTC), @@ -489,10 +492,12 @@ func TestResponderGetLCR(t *testing.T) { } else if !reflect.DeepEqual(eQTLcr.SupplierCosts, lcrQT.SupplierCosts) { t.Errorf("Expecting: %+v, received: %+v", eQTLcr.SupplierCosts, lcrQT.SupplierCosts) } + cdr := &CDR{Supplier: "rif12", AnswerTime: time.Now(), Usage: 3 * time.Minute, Cost: 1} - rsponder.Stats.AppendCDR(cdr, nil) + rsponder.Stats.Call("CDRStatsV1.AppendCDR", cdr, &r) cdr = &CDR{Supplier: "dan12", AnswerTime: time.Now(), Usage: 5 * time.Minute, Cost: 2} - rsponder.Stats.AppendCDR(cdr, nil) + rsponder.Stats.Call("CDRStatsV1.AppendCDR", cdr, &r) + eQTLcr = &LCRCost{ Entry: &LCREntry{DestinationId: utils.ANY, RPCategory: "call", Strategy: LCR_STRATEGY_QOS_THRESHOLD, StrategyParams: "35;;;;4m;;;;;;;;;", Weight: 10.0}, SupplierCosts: []*LCRSupplierCost{ @@ -538,3 +543,96 @@ func TestResponderGetLCR(t *testing.T) { t.Errorf("Expecting: %+v, received: %+v", eQosLcr.SupplierCosts, lcrQ.SupplierCosts) } } + +func TestResponderGobSMCost(t *testing.T) { + attr := AttrCDRSStoreSMCost{ + Cost: &SMCost{ + CGRID: "b783a8bcaa356570436983cd8a0e6de4993f9ba6", + RunID: "*default", + OriginHost: "", + OriginID: "testdatagrp_grp1", + CostSource: "SMR", + Usage: 1536, + CostDetails: &CallCost{ + Direction: "*out", + Category: "generic", + Tenant: "cgrates.org", + Subject: "1001", + Account: "1001", + Destination: "data", + TOR: "*data", + Cost: 0, + Timespans: TimeSpans{&TimeSpan{ + TimeStart: time.Date(2016, 1, 5, 12, 30, 10, 0, time.UTC), + TimeEnd: time.Date(2016, 1, 5, 12, 55, 46, 0, time.UTC), + Cost: 0, + RateInterval: &RateInterval{ + Timing: nil, + Rating: &RIRate{ + ConnectFee: 0, + RoundingMethod: "", + RoundingDecimals: 0, + MaxCost: 0, + MaxCostStrategy: "", + Rates: RateGroups{&Rate{ + GroupIntervalStart: 0, + Value: 0, + RateIncrement: 1 * time.Second, + RateUnit: 1 * time.Second, + }, + }, + }, + Weight: 0, + }, + DurationIndex: 0, + Increments: Increments{&Increment{ + Duration: 1 * time.Second, + Cost: 0, + BalanceInfo: &DebitInfo{ + Unit: &UnitInfo{ + UUID: "fa0aa280-2b76-4b5b-bb06-174f84b8c321", + ID: "", + Value: 100864, + DestinationID: "data", + Consumed: 1, + TOR: "*data", + RateInterval: nil, + }, + Monetary: nil, + AccountID: "cgrates.org:1001", + }, + CompressFactor: 1536, + }, + }, + RoundIncrement: nil, + MatchedSubject: "fa0aa280-2b76-4b5b-bb06-174f84b8c321", + MatchedPrefix: "data", + MatchedDestId: "*any", + RatingPlanId: "*none", + CompressFactor: 1, + }, + }, + RatedUsage: 1536, + }, + }, + CheckDuplicate: false, + } + + var network bytes.Buffer // Stand-in for a network connection + enc := gob.NewEncoder(&network) // Will write to network. + dec := gob.NewDecoder(&network) // Will read from network. + err := enc.Encode(attr) + if err != nil { + t.Error("encode error: ", err) + } + + // Decode (receive) and print the values. + var q AttrCDRSStoreSMCost + err = dec.Decode(&q) + if err != nil { + t.Error("decode error: ", err) + } + if !reflect.DeepEqual(attr, q) { + t.Error("wrong transmission") + } +} diff --git a/engine/stats.go b/engine/stats.go index d07f72e41..571615ec6 100644 --- a/engine/stats.go +++ b/engine/stats.go @@ -20,11 +20,12 @@ package engine import ( "fmt" + "reflect" + "strings" "sync" "time" "github.com/cgrates/cgrates/utils" - "github.com/cgrates/rpcclient" ) type StatsInterface interface { @@ -303,54 +304,30 @@ func (s *Stats) Stop(int, *int) error { return nil } -type ProxyStats struct { - Client *rpcclient.RpcClient -} - -func NewProxyStats(addr string, attempts, reconnects int) (*ProxyStats, error) { - client, err := rpcclient.NewRpcClient("tcp", addr, attempts, reconnects, utils.GOB, nil) - if err != nil { - return nil, err +func (s *Stats) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(s).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented } - return &ProxyStats{Client: client}, nil -} -func (ps *ProxyStats) GetValues(sqID string, values *map[string]float64) error { - return ps.Client.Call("Stats.GetValues", sqID, values) -} + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} -func (ps *ProxyStats) AppendCDR(cdr *CDR, out *int) error { - return ps.Client.Call("Stats.AppendCDR", cdr, out) -} - -func (ps *ProxyStats) GetQueueIds(in int, ids *[]string) error { - return ps.Client.Call("Stats.GetQueueIds", in, ids) -} - -func (ps *ProxyStats) GetQueue(id string, sq *StatsQueue) error { - return ps.Client.Call("Stats.GetQueue", id, sq) -} - -func (ps *ProxyStats) GetQueueTriggers(id string, ats *ActionTriggers) error { - return ps.Client.Call("Stats.GetQueueTriggers", id, ats) -} - -func (ps *ProxyStats) AddQueue(cs *CdrStats, out *int) error { - return ps.Client.Call("Stats.AddQueue", cs, out) -} - -func (ps *ProxyStats) RemoveQueue(qID string, out *int) error { - return ps.Client.Call("Stats.RemoveQueue", qID, out) -} - -func (ps *ProxyStats) ReloadQueues(ids []string, out *int) error { - return ps.Client.Call("Stats.ReloadQueues", ids, out) -} - -func (ps *ProxyStats) ResetQueues(ids []string, out *int) error { - return ps.Client.Call("Stats.ResetQueues", ids, out) -} - -func (ps *ProxyStats) Stop(i int, r *int) error { - return ps.Client.Call("Stats.Stop", 0, i) + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err } diff --git a/engine/stats_queue.go b/engine/stats_queue.go index bfc07356d..6260fd1ee 100644 --- a/engine/stats_queue.go +++ b/engine/stats_queue.go @@ -56,6 +56,7 @@ var METRIC_TRIGGER_MAP = map[string]string{ type QCdr struct { SetupTime time.Time AnswerTime time.Time + EventTime time.Time Pdd time.Duration Usage time.Duration Cost float64 @@ -111,15 +112,19 @@ func (sq *StatsQueue) Load(saved *StatsQueue) { } } -func (sq *StatsQueue) AppendCDR(cdr *CDR) { +func (sq *StatsQueue) AppendCDR(cdr *CDR) *QCdr { sq.mux.Lock() defer sq.mux.Unlock() + var qcdr *QCdr if sq.conf.AcceptCdr(cdr) { - sq.appendQcdr(sq.simplifyCdr(cdr), true) + qcdr = sq.simplifyCdr(cdr) + sq.appendQcdr(qcdr, true) } + return qcdr } func (sq *StatsQueue) appendQcdr(qcdr *QCdr, runTrigger bool) { + qcdr.EventTime = time.Now() //used for TimeWindow sq.Cdrs = append(sq.Cdrs, qcdr) sq.addToMetrics(qcdr) sq.purgeObsoleteCdrs() @@ -151,6 +156,7 @@ func (sq *StatsQueue) appendQcdr(qcdr *QCdr, runTrigger bool) { } func (sq *StatsQueue) addToMetrics(cdr *QCdr) { + //log.Print("AddToMetrics: " + utils.ToIJSON(cdr)) for _, metric := range sq.metrics { metric.AddCdr(cdr) } @@ -184,15 +190,20 @@ func (sq *StatsQueue) purgeObsoleteCdrs() { } } if sq.conf.TimeWindow > 0 { + index := -1 for i, cdr := range sq.Cdrs { - if time.Now().Sub(cdr.SetupTime) > sq.conf.TimeWindow { + if time.Now().Sub(cdr.EventTime) > sq.conf.TimeWindow { sq.removeFromMetrics(cdr) + index = i continue + } + break + } + if index > -1 { + if index < len(sq.Cdrs)-1 { + sq.Cdrs = sq.Cdrs[index+1:] } else { - if i > 0 { - sq.Cdrs = sq.Cdrs[i:] - } - break + sq.Cdrs = make([]*QCdr, 0) } } } diff --git a/engine/stats_test.go b/engine/stats_test.go index def739f13..c618fb373 100644 --- a/engine/stats_test.go +++ b/engine/stats_test.go @@ -35,6 +35,7 @@ func TestStatsQueueInit(t *testing.T) { func TestStatsValue(t *testing.T) { sq := NewStatsQueue(&CdrStats{Metrics: []string{ASR, ACD, TCD, ACC, TCC}}) cdr := &CDR{ + SetupTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), Usage: 10 * time.Second, Cost: 1, @@ -227,7 +228,7 @@ func TestStatsAppendCdr(t *testing.T) { if len(cdrStats.queues) != 2 || len(cdrStats.queues["CDRST1"].Cdrs) != 0 || len(cdrStats.queues["CDRST2"].Cdrs) != 1 { - t.Error("Error appending cdr to queue: ", len(cdrStats.queues)) + t.Error("Error appending cdr to queue: ", utils.ToIJSON(cdrStats.queues)) } } @@ -458,3 +459,98 @@ func TestStatsSaveRestoreQeue(t *testing.T) { t.Errorf("Expecting %+v got: %+v", sq.Cdrs[0], recovered.Cdrs[0]) } } + +func TestStatsPurgeTimeOne(t *testing.T) { + sq := NewStatsQueue(&CdrStats{Metrics: []string{ASR, ACD, TCD, ACC, TCC}, TimeWindow: 30 * time.Minute}) + cdr := &CDR{ + SetupTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + Usage: 10 * time.Second, + Cost: 1, + } + qcdr := sq.AppendCDR(cdr) + qcdr.EventTime = qcdr.SetupTime + s := sq.GetStats() + if s[ASR] != -1 || + s[ACD] != -1 || + s[TCD] != -1 || + s[ACC] != -1 || + s[TCC] != -1 { + t.Errorf("Error getting stats: %+v", s) + } +} + +func TestStatsPurgeTime(t *testing.T) { + sq := NewStatsQueue(&CdrStats{Metrics: []string{ASR, ACD, TCD, ACC, TCC}, TimeWindow: 30 * time.Minute}) + cdr := &CDR{ + SetupTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + Usage: 10 * time.Second, + Cost: 1, + } + qcdr := sq.AppendCDR(cdr) + qcdr.EventTime = qcdr.SetupTime + cdr.Cost = 2 + qcdr = sq.AppendCDR(cdr) + qcdr.EventTime = qcdr.SetupTime + cdr.Cost = 3 + qcdr = sq.AppendCDR(cdr) + qcdr.EventTime = qcdr.SetupTime + s := sq.GetStats() + if s[ASR] != -1 || + s[ACD] != -1 || + s[TCD] != -1 || + s[ACC] != -1 || + s[TCC] != -1 { + t.Errorf("Error getting stats: %+v", s) + } +} + +func TestStatsPurgeTimeFirst(t *testing.T) { + sq := NewStatsQueue(&CdrStats{Metrics: []string{ASR, ACD, TCD, ACC, TCC}, TimeWindow: 30 * time.Minute}) + cdr := &CDR{ + SetupTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + Usage: 10 * time.Second, + Cost: 1, + } + qcdr := sq.AppendCDR(cdr) + cdr.Cost = 2 + cdr.SetupTime = time.Date(2024, 7, 14, 14, 25, 0, 0, time.UTC) + cdr.AnswerTime = time.Date(2024, 7, 14, 14, 25, 0, 0, time.UTC) + qcdr.EventTime = qcdr.SetupTime + sq.AppendCDR(cdr) + cdr.Cost = 3 + sq.AppendCDR(cdr) + s := sq.GetStats() + if s[ASR] != 100 || + s[ACD] != 10 || + s[TCD] != 20 || + s[ACC] != 2.5 || + s[TCC] != 5 { + t.Errorf("Error getting stats: %+v", s) + } +} + +func TestStatsPurgeLength(t *testing.T) { + sq := NewStatsQueue(&CdrStats{Metrics: []string{ASR, ACD, TCD, ACC, TCC}, QueueLength: 1}) + cdr := &CDR{ + SetupTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + AnswerTime: time.Date(2014, 7, 14, 14, 25, 0, 0, time.UTC), + Usage: 10 * time.Second, + Cost: 1, + } + sq.AppendCDR(cdr) + cdr.Cost = 2 + sq.AppendCDR(cdr) + cdr.Cost = 3 + sq.AppendCDR(cdr) + s := sq.GetStats() + if s[ASR] != 100 || + s[ACD] != 10 || + s[TCD] != 10 || + s[ACC] != 3 || + s[TCC] != 3 { + t.Errorf("Error getting stats: %+v", s) + } +} diff --git a/engine/storage_cdrs_it_test.go b/engine/storage_cdrs_it_test.go index f264ec237..656098885 100644 --- a/engine/storage_cdrs_it_test.go +++ b/engine/storage_cdrs_it_test.go @@ -19,10 +19,11 @@ along with this program. If not, see package engine import ( + "errors" "flag" "fmt" "path" - //"reflect" + "strconv" "testing" "time" @@ -222,13 +223,28 @@ func testSMCosts(cfg *config.CGRConfig) error { }, TOR: utils.VOICE, } - if err := cdrStorage.LogCallCost("164b0422fdc6a5117031b427439482c6a4f90e41", utils.META_DEFAULT, utils.UNIT_TEST, cc); err != nil { + if err := cdrStorage.SetSMCost(&SMCost{CGRID: "164b0422fdc6a5117031b427439482c6a4f90e41", RunID: utils.META_DEFAULT, OriginHost: "localhost", OriginID: "12345", + CostSource: utils.UNIT_TEST, CostDetails: cc}); err != nil { return err } - if rcvCC, err := cdrStorage.GetCallCostLog("164b0422fdc6a5117031b427439482c6a4f90e41", utils.META_DEFAULT); err != nil { + if rcvSMC, err := cdrStorage.GetSMCosts("164b0422fdc6a5117031b427439482c6a4f90e41", utils.META_DEFAULT, "", ""); err != nil { return err - } else if len(cc.Timespans) != len(rcvCC.Timespans) { // cc.Timespans[0].RateInterval.Rating.Rates[0], rcvCC.Timespans[0].RateInterval.Rating.Rates[0]) - return fmt.Errorf("Expecting: %+v, received: %+v", cc, rcvCC) + } else if len(rcvSMC) == 0 { + return errors.New("No SMCosts received") + } else if len(cc.Timespans) != len(rcvSMC[0].CostDetails.Timespans) { // cc.Timespans[0].RateInterval.Rating.Rates[0], rcvCC.Timespans[0].RateInterval.Rating.Rates[0]) + return fmt.Errorf("Expecting: %+v, received: %+s", cc, utils.ToIJSON(rcvSMC[0])) + } + // Test query per prefix + for i := 0; i < 3; i++ { + if err := cdrStorage.SetSMCost(&SMCost{CGRID: "164b0422fdc6a5117031b427439482c6a4f90e5" + strconv.Itoa(i), RunID: utils.META_DEFAULT, OriginHost: "localhost", OriginID: "abc" + strconv.Itoa(i), + CostSource: utils.UNIT_TEST, CostDetails: cc}); err != nil { + return err + } + } + if rcvSMC, err := cdrStorage.GetSMCosts("", utils.META_DEFAULT, "localhost", "abc"); err != nil { + return err + } else if len(rcvSMC) != 3 { + return fmt.Errorf("Expecting 3, received: %d", len(rcvSMC)) } return nil } diff --git a/engine/storage_interface.go b/engine/storage_interface.go index 8d082ae14..327be197f 100644 --- a/engine/storage_interface.go +++ b/engine/storage_interface.go @@ -33,7 +33,7 @@ import ( type Storage interface { Close() Flush(string) error - GetKeysForPrefix(string) ([]string, error) + GetKeysForPrefix(string, bool) ([]string, error) } // Interface for storage providers. @@ -59,6 +59,7 @@ type RatingStorage interface { SetDerivedChargers(string, *utils.DerivedChargers) error GetActions(string, bool) (Actions, error) SetActions(string, Actions) error + RemoveActions(string) error GetSharedGroup(string, bool) (*SharedGroup, error) SetSharedGroup(*SharedGroup) error GetActionTriggers(string) (ActionTriggers, error) @@ -92,13 +93,15 @@ type AccountingStorage interface { RemoveAlias(string) error GetLoadHistory(int, bool) ([]*LoadInstance, error) AddLoadHistory(*LoadInstance, int) error + GetStructVersion() (*StructVersion, error) + SetStructVersion(*StructVersion) error } type CdrStorage interface { Storage SetCDR(*CDR, bool) error - LogCallCost(cgrid, runid, source string, cc *CallCost) error - GetCallCostLog(cgrid, runid string) (*CallCost, error) + SetSMCost(smc *SMCost) error + GetSMCosts(cgrid, runid, originHost, originIDPrfx string) ([]*SMCost, error) GetCDRs(*utils.CDRsFilter, bool) ([]*CDR, int64, error) } @@ -202,6 +205,7 @@ func NewCodecMsgpackMarshaler() *CodecMsgpackMarshaler { cmm := &CodecMsgpackMarshaler{new(codec.MsgpackHandle)} mh := cmm.mh mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + mh.RawToString = true return cmm } diff --git a/engine/storage_map.go b/engine/storage_map.go index 0e77e3ae1..f9a747a5b 100644 --- a/engine/storage_map.go +++ b/engine/storage_map.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io/ioutil" + "sync" "strings" "time" @@ -36,6 +37,7 @@ type MapStorage struct { dict map[string][]byte tasks [][]byte ms Marshaler + mu sync.RWMutex } func NewMapStorage() (*MapStorage, error) { @@ -49,18 +51,25 @@ func NewMapStorageJson() (*MapStorage, error) { func (ms *MapStorage) Close() {} func (ms *MapStorage) Flush(ignore string) error { + ms.mu.Lock() + defer ms.mu.Unlock() ms.dict = make(map[string][]byte) return nil } -func (ms *MapStorage) GetKeysForPrefix(prefix string) ([]string, error) { - keysForPrefix := make([]string, 0) - for key := range ms.dict { - if strings.HasPrefix(key, prefix) { - keysForPrefix = append(keysForPrefix, key) +func (ms *MapStorage) GetKeysForPrefix(prefix string, skipCache bool) ([]string, error) { + ms.mu.RLock() + defer ms.mu.RUnlock() + if skipCache { + keysForPrefix := make([]string, 0) + for key := range ms.dict { + if strings.HasPrefix(key, prefix) { + keysForPrefix = append(keysForPrefix, key) + } } + return keysForPrefix, nil } - return keysForPrefix, nil + return cache2go.GetEntriesKeys(prefix), nil } func (ms *MapStorage) CacheRatingAll() error { @@ -253,6 +262,8 @@ func (ms *MapStorage) cacheAccounting(alsKeys []string) error { // Used to check if specific subject is stored using prefix key attached to entity func (ms *MapStorage) HasData(categ, subject string) (bool, error) { + ms.mu.RLock() + defer ms.mu.RUnlock() switch categ { case utils.DESTINATION_PREFIX, utils.RATING_PLAN_PREFIX, utils.RATING_PROFILE_PREFIX, utils.ACTION_PREFIX, utils.ACTION_PLAN_PREFIX, utils.ACCOUNT_PREFIX, utils.DERIVEDCHARGERS_PREFIX: _, exists := ms.dict[categ+subject] @@ -262,6 +273,8 @@ func (ms *MapStorage) HasData(categ, subject string) (bool, error) { } func (ms *MapStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.RATING_PLAN_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -291,6 +304,8 @@ func (ms *MapStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, } func (ms *MapStorage) SetRatingPlan(rp *RatingPlan) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(rp) var b bytes.Buffer w := zlib.NewWriter(&b) @@ -299,12 +314,14 @@ func (ms *MapStorage) SetRatingPlan(rp *RatingPlan) (err error) { ms.dict[utils.RATING_PLAN_PREFIX+rp.Id] = b.Bytes() response := 0 if historyScribe != nil { - go historyScribe.Record(rp.GetHistoryRecord(), &response) + go historyScribe.Call("HistoryV1.Record", rp.GetHistoryRecord(), &response) } return } func (ms *MapStorage) GetRatingProfile(key string, skipCache bool) (rpf *RatingProfile, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.RATING_PROFILE_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -325,16 +342,20 @@ func (ms *MapStorage) GetRatingProfile(key string, skipCache bool) (rpf *RatingP } func (ms *MapStorage) SetRatingProfile(rpf *RatingProfile) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(rpf) ms.dict[utils.RATING_PROFILE_PREFIX+rpf.Id] = result response := 0 if historyScribe != nil { - go historyScribe.Record(rpf.GetHistoryRecord(false), &response) + go historyScribe.Call("HistoryV1.Record", rpf.GetHistoryRecord(false), &response) } return } func (ms *MapStorage) RemoveRatingProfile(key string) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() for k := range ms.dict { if strings.HasPrefix(k, key) { delete(ms.dict, key) @@ -342,7 +363,7 @@ func (ms *MapStorage) RemoveRatingProfile(key string) (err error) { response := 0 rpf := &RatingProfile{Id: key} if historyScribe != nil { - go historyScribe.Record(rpf.GetHistoryRecord(true), &response) + go historyScribe.Call("HistoryV1.Record", rpf.GetHistoryRecord(true), &response) } } } @@ -350,6 +371,8 @@ func (ms *MapStorage) RemoveRatingProfile(key string) (err error) { } func (ms *MapStorage) GetLCR(key string, skipCache bool) (lcr *LCR, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.LCR_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -368,12 +391,16 @@ func (ms *MapStorage) GetLCR(key string, skipCache bool) (lcr *LCR, err error) { } func (ms *MapStorage) SetLCR(lcr *LCR) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(lcr) ms.dict[utils.LCR_PREFIX+lcr.GetId()] = result return } func (ms *MapStorage) GetDestination(key string) (dest *Destination, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.DESTINATION_PREFIX + key if values, ok := ms.dict[key]; ok { b := bytes.NewBuffer(values) @@ -399,6 +426,8 @@ func (ms *MapStorage) GetDestination(key string) (dest *Destination, err error) } func (ms *MapStorage) SetDestination(dest *Destination) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(dest) var b bytes.Buffer w := zlib.NewWriter(&b) @@ -407,12 +436,14 @@ func (ms *MapStorage) SetDestination(dest *Destination) (err error) { ms.dict[utils.DESTINATION_PREFIX+dest.Id] = b.Bytes() response := 0 if historyScribe != nil { - go historyScribe.Record(dest.GetHistoryRecord(), &response) + go historyScribe.Call("HistoryV1.Record", dest.GetHistoryRecord(), &response) } return } func (ms *MapStorage) GetActions(key string, skipCache bool) (as Actions, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.ACTION_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -431,12 +462,23 @@ func (ms *MapStorage) GetActions(key string, skipCache bool) (as Actions, err er } func (ms *MapStorage) SetActions(key string, as Actions) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(&as) ms.dict[utils.ACTION_PREFIX+key] = result return } +func (ms *MapStorage) RemoveActions(key string) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() + delete(ms.dict, utils.ACTION_PREFIX+key) + return +} + func (ms *MapStorage) GetSharedGroup(key string, skipCache bool) (sg *SharedGroup, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.SHARED_GROUP_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -457,12 +499,16 @@ func (ms *MapStorage) GetSharedGroup(key string, skipCache bool) (sg *SharedGrou } func (ms *MapStorage) SetSharedGroup(sg *SharedGroup) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(sg) ms.dict[utils.SHARED_GROUP_PREFIX+sg.Id] = result return } func (ms *MapStorage) GetAccount(key string) (ub *Account, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() if values, ok := ms.dict[utils.ACCOUNT_PREFIX+key]; ok { ub = &Account{ID: key} err = ms.ms.Unmarshal(values, ub) @@ -485,17 +531,23 @@ func (ms *MapStorage) SetAccount(ub *Account) (err error) { ub = ac } } + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(ub) ms.dict[utils.ACCOUNT_PREFIX+ub.ID] = result return } func (ms *MapStorage) RemoveAccount(key string) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() delete(ms.dict, utils.ACCOUNT_PREFIX+key) return } func (ms *MapStorage) GetCdrStatsQueue(key string) (sq *StatsQueue, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() if values, ok := ms.dict[utils.CDR_STATS_QUEUE_PREFIX+key]; ok { sq = &StatsQueue{} err = ms.ms.Unmarshal(values, sq) @@ -506,12 +558,16 @@ func (ms *MapStorage) GetCdrStatsQueue(key string) (sq *StatsQueue, err error) { } func (ms *MapStorage) SetCdrStatsQueue(sq *StatsQueue) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(sq) ms.dict[utils.CDR_STATS_QUEUE_PREFIX+sq.GetId()] = result return } func (ms *MapStorage) GetSubscribers() (result map[string]*SubscriberData, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() result = make(map[string]*SubscriberData) for key, value := range ms.dict { if strings.HasPrefix(key, utils.PUBSUB_SUBSCRIBERS_PREFIX) { @@ -524,17 +580,23 @@ func (ms *MapStorage) GetSubscribers() (result map[string]*SubscriberData, err e return } func (ms *MapStorage) SetSubscriber(key string, sub *SubscriberData) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(sub) ms.dict[utils.PUBSUB_SUBSCRIBERS_PREFIX+key] = result return } func (ms *MapStorage) RemoveSubscriber(key string) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() delete(ms.dict, utils.PUBSUB_SUBSCRIBERS_PREFIX+key) return } func (ms *MapStorage) SetUser(up *UserProfile) error { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(up) if err != nil { return err @@ -543,6 +605,8 @@ func (ms *MapStorage) SetUser(up *UserProfile) error { return nil } func (ms *MapStorage) GetUser(key string) (up *UserProfile, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() up = &UserProfile{} if values, ok := ms.dict[utils.USERS_PREFIX+key]; ok { err = ms.ms.Unmarshal(values, &up) @@ -553,6 +617,8 @@ func (ms *MapStorage) GetUser(key string) (up *UserProfile, err error) { } func (ms *MapStorage) GetUsers() (result []*UserProfile, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() for key, value := range ms.dict { if strings.HasPrefix(key, utils.USERS_PREFIX) { up := &UserProfile{} @@ -565,11 +631,15 @@ func (ms *MapStorage) GetUsers() (result []*UserProfile, err error) { } func (ms *MapStorage) RemoveUser(key string) error { + ms.mu.Lock() + defer ms.mu.Unlock() delete(ms.dict, utils.USERS_PREFIX+key) return nil } func (ms *MapStorage) SetAlias(al *Alias) error { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(al.Values) if err != nil { return err @@ -579,6 +649,8 @@ func (ms *MapStorage) SetAlias(al *Alias) error { } func (ms *MapStorage) GetAlias(key string, skipCache bool) (al *Alias, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.ALIASES_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -604,6 +676,8 @@ func (ms *MapStorage) GetAlias(key string, skipCache bool) (al *Alias, err error } func (ms *MapStorage) RemoveAlias(key string) error { + ms.mu.Lock() + defer ms.mu.Unlock() al := &Alias{} al.SetId(key) key = utils.ALIASES_PREFIX + key @@ -619,14 +693,20 @@ func (ms *MapStorage) RemoveAlias(key string) error { } func (ms *MapStorage) GetLoadHistory(limitItems int, skipCache bool) ([]*LoadInstance, error) { + ms.mu.RLock() + defer ms.mu.RUnlock() return nil, nil } func (ms *MapStorage) AddLoadHistory(*LoadInstance, int) error { + ms.mu.Lock() + defer ms.mu.Unlock() return nil } func (ms *MapStorage) GetActionTriggers(key string) (atrs ActionTriggers, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() if values, ok := ms.dict[utils.ACTION_TRIGGER_PREFIX+key]; ok { err = ms.ms.Unmarshal(values, &atrs) } else { @@ -636,6 +716,8 @@ func (ms *MapStorage) GetActionTriggers(key string) (atrs ActionTriggers, err er } func (ms *MapStorage) SetActionTriggers(key string, atrs ActionTriggers) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() if len(atrs) == 0 { // delete the key delete(ms.dict, utils.ACTION_TRIGGER_PREFIX+key) @@ -647,6 +729,8 @@ func (ms *MapStorage) SetActionTriggers(key string, atrs ActionTriggers) (err er } func (ms *MapStorage) GetActionPlan(key string, skipCache bool) (ats *ActionPlan, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.ACTION_PLAN_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -666,6 +750,8 @@ func (ms *MapStorage) GetActionPlan(key string, skipCache bool) (ats *ActionPlan func (ms *MapStorage) SetActionPlan(key string, ats *ActionPlan, overwrite bool) (err error) { if len(ats.ActionTimings) == 0 { + ms.mu.Lock() + defer ms.mu.Unlock() // delete the key delete(ms.dict, utils.ACTION_PLAN_PREFIX+key) cache2go.RemKey(utils.ACTION_PLAN_PREFIX + key) @@ -682,12 +768,16 @@ func (ms *MapStorage) SetActionPlan(key string, ats *ActionPlan, overwrite bool) } } } + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(&ats) ms.dict[utils.ACTION_PLAN_PREFIX+key] = result return } func (ms *MapStorage) GetAllActionPlans() (ats map[string]*ActionPlan, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() apls, err := cache2go.GetAllEntries(utils.ACTION_PLAN_PREFIX) if err != nil { return nil, err @@ -703,6 +793,8 @@ func (ms *MapStorage) GetAllActionPlans() (ats map[string]*ActionPlan, err error } func (ms *MapStorage) PushTask(t *Task) error { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(t) if err != nil { return err @@ -712,6 +804,8 @@ func (ms *MapStorage) PushTask(t *Task) error { } func (ms *MapStorage) PopTask() (t *Task, err error) { + ms.mu.Lock() + defer ms.mu.Unlock() if len(ms.tasks) > 0 { var values []byte values, ms.tasks = ms.tasks[0], ms.tasks[1:] @@ -724,6 +818,8 @@ func (ms *MapStorage) PopTask() (t *Task, err error) { } func (ms *MapStorage) GetDerivedChargers(key string, skipCache bool) (dcs *utils.DerivedChargers, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() key = utils.DERIVEDCHARGERS_PREFIX + key if !skipCache { if x, err := cache2go.Get(key); err == nil { @@ -742,6 +838,8 @@ func (ms *MapStorage) GetDerivedChargers(key string, skipCache bool) (dcs *utils } func (ms *MapStorage) SetDerivedChargers(key string, dcs *utils.DerivedChargers) error { + ms.mu.Lock() + defer ms.mu.Unlock() if dcs == nil || len(dcs.Chargers) == 0 { delete(ms.dict, utils.DERIVEDCHARGERS_PREFIX+key) cache2go.RemKey(utils.DERIVEDCHARGERS_PREFIX + key) @@ -753,12 +851,16 @@ func (ms *MapStorage) SetDerivedChargers(key string, dcs *utils.DerivedChargers) } func (ms *MapStorage) SetCdrStats(cs *CdrStats) error { + ms.mu.Lock() + defer ms.mu.Unlock() result, err := ms.ms.Marshal(cs) ms.dict[utils.CDR_STATS_PREFIX+cs.Id] = result return err } func (ms *MapStorage) GetCdrStats(key string) (cs *CdrStats, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() if values, ok := ms.dict[utils.CDR_STATS_PREFIX+key]; ok { err = ms.ms.Unmarshal(values, &cs) } else { @@ -768,6 +870,8 @@ func (ms *MapStorage) GetCdrStats(key string) (cs *CdrStats, err error) { } func (ms *MapStorage) GetAllCdrStats() (css []*CdrStats, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() for key, value := range ms.dict { if !strings.HasPrefix(key, utils.CDR_STATS_PREFIX) { continue @@ -779,15 +883,19 @@ func (ms *MapStorage) GetAllCdrStats() (css []*CdrStats, err error) { return } -func (ms *MapStorage) LogCallCost(cgrid, source, runid string, cc *CallCost) error { - result, err := ms.ms.Marshal(cc) - ms.dict[utils.LOG_CALL_COST_PREFIX+source+runid+"_"+cgrid] = result +func (ms *MapStorage) SetSMCost(smCost *SMCost) error { + ms.mu.Lock() + defer ms.mu.Unlock() + result, err := ms.ms.Marshal(smCost) + ms.dict[utils.LOG_CALL_COST_PREFIX+smCost.CostSource+smCost.RunID+"_"+smCost.CGRID] = result return err } -func (ms *MapStorage) GetCallCostLog(cgrid, source, runid string) (cc *CallCost, err error) { +func (ms *MapStorage) GetSMCost(cgrid, source, runid, originHost, originID string) (smCost *SMCost, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() if values, ok := ms.dict[utils.LOG_CALL_COST_PREFIX+source+runid+"_"+cgrid]; ok { - err = ms.ms.Unmarshal(values, &cc) + err = ms.ms.Unmarshal(values, &smCost) } else { return nil, utils.ErrNotFound } @@ -795,6 +903,8 @@ func (ms *MapStorage) GetCallCostLog(cgrid, source, runid string) (cc *CallCost, } func (ms *MapStorage) LogActionTrigger(ubId, source string, at *ActionTrigger, as Actions) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() mat, err := ms.ms.Marshal(at) if err != nil { return @@ -808,6 +918,8 @@ func (ms *MapStorage) LogActionTrigger(ubId, source string, at *ActionTrigger, a } func (ms *MapStorage) LogActionTiming(source string, at *ActionTiming, as Actions) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() mat, err := ms.ms.Marshal(at) if err != nil { return @@ -819,3 +931,27 @@ func (ms *MapStorage) LogActionTiming(source string, at *ActionTiming, as Action ms.dict[utils.LOG_ACTION_TIMMING_PREFIX+source+"_"+time.Now().Format(time.RFC3339Nano)] = []byte(fmt.Sprintf("%s*%s", string(mat), string(mas))) return } + +func (ms *MapStorage) SetStructVersion(v *StructVersion) (err error) { + ms.mu.Lock() + defer ms.mu.Unlock() + var result []byte + result, err = ms.ms.Marshal(v) + if err != nil { + return + } + ms.dict[utils.VERSION_PREFIX+"struct"] = result + return +} + +func (ms *MapStorage) GetStructVersion() (rsv *StructVersion, err error) { + ms.mu.RLock() + defer ms.mu.RUnlock() + rsv = &StructVersion{} + if values, ok := ms.dict[utils.VERSION_PREFIX+"struct"]; ok { + err = ms.ms.Unmarshal(values, &rsv) + } else { + return nil, utils.ErrNotFound + } + return +} diff --git a/engine/storage_mongo_datadb.go b/engine/storage_mongo_datadb.go index 4fb29e84b..70fc91dc4 100644 --- a/engine/storage_mongo_datadb.go +++ b/engine/storage_mongo_datadb.go @@ -53,12 +53,15 @@ const ( colLogAtr = "action_trigger_logs" colLogApl = "action_plan_logs" colLogErr = "error_logs" + colVer = "versions" ) var ( CGRIDLow = strings.ToLower(utils.CGRID) RunIDLow = strings.ToLower(utils.MEDI_RUNID) OrderIDLow = strings.ToLower(utils.ORDERID) + OriginHostLow = strings.ToLower(utils.CDRHOST) + OriginIDLow = strings.ToLower(utils.ACCID) ToRLow = strings.ToLower(utils.TOR) CDRHostLow = strings.ToLower(utils.CDRHOST) CDRSourceLow = strings.ToLower(utils.CDRSOURCE) @@ -216,7 +219,7 @@ func NewMongoStorage(host, port, db, user, pass string, cdrsIndexes []string) (* } } index = mgo.Index{ - Key: []string{CGRIDLow, RunIDLow}, + Key: []string{CGRIDLow, RunIDLow, OriginIDLow}, Unique: true, DropDups: false, Background: false, @@ -237,6 +240,26 @@ func NewMongoStorage(host, port, db, user, pass string, cdrsIndexes []string) (* return nil, err } } + index = mgo.Index{ + Key: []string{CGRIDLow, RunIDLow}, + Unique: true, + DropDups: false, + Background: false, + Sparse: false, + } + if err = ndb.C(utils.TBLSMCosts).EnsureIndex(index); err != nil { + return nil, err + } + index = mgo.Index{ + Key: []string{OriginHostLow, OriginIDLow}, + Unique: false, + DropDups: false, + Background: false, + Sparse: false, + } + if err = ndb.C(utils.TBLSMCosts).EnsureIndex(index); err != nil { + return nil, err + } return &MongoStorage{db: ndb, session: session, ms: NewCodecMsgpackMarshaler()}, err } @@ -244,8 +267,66 @@ func (ms *MongoStorage) Close() { ms.session.Close() } -func (ms *MongoStorage) GetKeysForPrefix(prefix string) ([]string, error) { - return nil, nil +func (ms *MongoStorage) GetKeysForPrefix(prefix string, skipCache bool) ([]string, error) { + var category, subject string + length := len(utils.DESTINATION_PREFIX) + if len(prefix) >= length { + category = prefix[:length] // prefix lenght + subject = fmt.Sprintf("^%s", prefix[length:]) + } else { + return nil, fmt.Errorf("unsupported prefix in GetKeysForPrefix: %s", prefix) + } + var result []string + if skipCache { + keyResult := struct{ Key string }{} + idResult := struct{ Id string }{} + switch category { + case utils.DESTINATION_PREFIX: + iter := ms.db.C(colDst).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter() + for iter.Next(&keyResult) { + result = append(result, utils.DESTINATION_PREFIX+keyResult.Key) + } + return result, nil + case utils.RATING_PLAN_PREFIX: + iter := ms.db.C(colRpl).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter() + for iter.Next(&keyResult) { + result = append(result, utils.RATING_PLAN_PREFIX+keyResult.Key) + } + return result, nil + case utils.RATING_PROFILE_PREFIX: + iter := ms.db.C(colRpf).Find(bson.M{"id": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"id": 1}).Iter() + for iter.Next(&idResult) { + result = append(result, utils.RATING_PROFILE_PREFIX+idResult.Id) + } + return result, nil + case utils.ACTION_PREFIX: + iter := ms.db.C(colAct).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter() + for iter.Next(&keyResult) { + result = append(result, utils.ACTION_PREFIX+keyResult.Key) + } + return result, nil + case utils.ACTION_PLAN_PREFIX: + iter := ms.db.C(colApl).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter() + for iter.Next(&keyResult) { + result = append(result, utils.ACTION_PLAN_PREFIX+keyResult.Key) + } + return result, nil + case utils.ACTION_TRIGGER_PREFIX: + iter := ms.db.C(colAtr).Find(bson.M{"key": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"key": 1}).Iter() + for iter.Next(&keyResult) { + result = append(result, utils.ACTION_TRIGGER_PREFIX+keyResult.Key) + } + return result, nil + case utils.ACCOUNT_PREFIX: + iter := ms.db.C(colAcc).Find(bson.M{"id": bson.M{"$regex": bson.RegEx{Pattern: subject}}}).Select(bson.M{"id": 1}).Iter() + for iter.Next(&idResult) { + result = append(result, utils.ACCOUNT_PREFIX+idResult.Id) + } + return result, nil + } + return result, fmt.Errorf("unsupported prefix in GetKeysForPrefix: %s", prefix) + } + return cache2go.GetEntriesKeys(prefix), nil } func (ms *MongoStorage) Flush(ignore string) (err error) { @@ -627,7 +708,7 @@ func (ms *MongoStorage) HasData(category, subject string) (bool, error) { count, err := ms.db.C(colAcc).Find(bson.M{"id": subject}).Count() return count > 0, err } - return false, errors.New("Unsupported category in HasData") + return false, errors.New("unsupported category in HasData") } func (ms *MongoStorage) GetRatingPlan(key string, skipCache bool) (rp *RatingPlan, err error) { @@ -679,7 +760,7 @@ func (ms *MongoStorage) SetRatingPlan(rp *RatingPlan) error { }{Key: rp.Id, Value: b.Bytes()}) if err == nil && historyScribe != nil { var response int - historyScribe.Record(rp.GetHistoryRecord(), &response) + historyScribe.Call("HistoryV1.Record", rp.GetHistoryRecord(), &response) } return err } @@ -704,7 +785,7 @@ func (ms *MongoStorage) SetRatingProfile(rp *RatingProfile) error { _, err := ms.db.C(colRpf).Upsert(bson.M{"id": rp.Id}, rp) if err == nil && historyScribe != nil { var response int - historyScribe.Record(rp.GetHistoryRecord(false), &response) + historyScribe.Call("HistoryV1.Record", rp.GetHistoryRecord(false), &response) } return err } @@ -720,7 +801,7 @@ func (ms *MongoStorage) RemoveRatingProfile(key string) error { rpf := &RatingProfile{Id: result.Id} if historyScribe != nil { var response int - go historyScribe.Record(rpf.GetHistoryRecord(true), &response) + go historyScribe.Call("HistoryV1.Record", rpf.GetHistoryRecord(true), &response) } } return iter.Close() @@ -802,7 +883,7 @@ func (ms *MongoStorage) SetDestination(dest *Destination) (err error) { }{Key: dest.Id, Value: b.Bytes()}) if err == nil && historyScribe != nil { var response int - historyScribe.Record(dest.GetHistoryRecord(), &response) + historyScribe.Call("HistoryV1.Record", dest.GetHistoryRecord(), &response) } return } @@ -835,6 +916,10 @@ func (ms *MongoStorage) SetActions(key string, as Actions) error { return err } +func (ms *MongoStorage) RemoveActions(key string) error { + return ms.db.C(colAct).Remove(bson.M{"key": key}) +} + func (ms *MongoStorage) GetSharedGroup(key string, skipCache bool) (sg *SharedGroup, err error) { if !skipCache { if x, err := cache2go.Get(utils.SHARED_GROUP_PREFIX + key); err == nil { @@ -1283,3 +1368,25 @@ func (ms *MongoStorage) GetAllCdrStats() (css []*CdrStats, err error) { err = iter.Close() return } + +func (ms *MongoStorage) SetStructVersion(v *StructVersion) (err error) { + _, err = ms.db.C(colVer).Upsert(bson.M{"key": utils.VERSION_PREFIX + "struct"}, &struct { + Key string + Value *StructVersion + }{utils.VERSION_PREFIX + "struct", v}) + return +} + +func (ms *MongoStorage) GetStructVersion() (rsv *StructVersion, err error) { + var result struct { + Key string + Value StructVersion + } + + err = ms.db.C(colVer).Find(bson.M{"key": utils.VERSION_PREFIX + "struct"}).One(&result) + if err == mgo.ErrNotFound { + rsv = nil + } + rsv = &result.Value + return +} diff --git a/engine/storage_mongo_stordb.go b/engine/storage_mongo_stordb.go index 0e0090def..4edd9ae8e 100644 --- a/engine/storage_mongo_stordb.go +++ b/engine/storage_mongo_stordb.go @@ -1,6 +1,7 @@ package engine import ( + "fmt" "regexp" "strings" "time" @@ -698,16 +699,25 @@ func (ms *MongoStorage) LogActionTiming(source string, at *ActionTiming, as Acti }{at, as, time.Now(), source}) } -func (ms *MongoStorage) LogCallCost(cgrid, runid, source string, cc *CallCost) error { - return ms.db.C(utils.TBLSMCosts).Insert(&SMCost{CGRID: cgrid, RunID: runid, CostSource: source, CostDetails: cc}) +func (ms *MongoStorage) SetSMCost(smc *SMCost) error { + return ms.db.C(utils.TBLSMCosts).Insert(smc) } -func (ms *MongoStorage) GetCallCostLog(cgrid, runid string) (cc *CallCost, err error) { - var result SMCost - if err = ms.db.C(utils.TBLSMCosts).Find(bson.M{CGRIDLow: cgrid, RunIDLow: runid}).One(&result); err != nil { +func (ms *MongoStorage) GetSMCosts(cgrid, runid, originHost, originIDPrefix string) (smcs []*SMCost, err error) { + filter := bson.M{CGRIDLow: cgrid, RunIDLow: runid} + if originIDPrefix != "" { + filter = bson.M{OriginIDLow: bson.M{"$regex": bson.RegEx{Pattern: fmt.Sprintf("^%s", originIDPrefix)}}, OriginHostLow: originHost, RunIDLow: runid} + } + // Execute query + iter := ms.db.C(utils.TBLSMCosts).Find(filter).Iter() + var smCost SMCost + for iter.Next(&smCost) { + smcs = append(smcs, &smCost) + } + if err := iter.Err(); err != nil { return nil, err } - return result.CostDetails, nil + return smcs, nil } func (ms *MongoStorage) SetCDR(cdr *CDR, allowUpdate bool) (err error) { diff --git a/engine/storage_redis.go b/engine/storage_redis.go index 52d4bb3a7..be591ff48 100644 --- a/engine/storage_redis.go +++ b/engine/storage_redis.go @@ -84,12 +84,15 @@ func (rs *RedisStorage) Flush(ignore string) error { return rs.db.Cmd("FLUSHDB").Err } -func (rs *RedisStorage) GetKeysForPrefix(prefix string) ([]string, error) { - r := rs.db.Cmd("KEYS", prefix+"*") - if r.Err != nil { - return nil, r.Err +func (rs *RedisStorage) GetKeysForPrefix(prefix string, skipCache bool) ([]string, error) { + if skipCache { + r := rs.db.Cmd("KEYS", prefix+"*") + if r.Err != nil { + return nil, r.Err + } + return r.List() } - return r.List() + return cache2go.GetEntriesKeys(prefix), nil } func (rs *RedisStorage) CacheRatingAll() error { @@ -439,7 +442,7 @@ func (rs *RedisStorage) SetRatingPlan(rp *RatingPlan) (err error) { err = rs.db.Cmd("SET", utils.RATING_PLAN_PREFIX+rp.Id, b.Bytes()).Err if err == nil && historyScribe != nil { response := 0 - go historyScribe.Record(rp.GetHistoryRecord(), &response) + go historyScribe.Call("HistoryV1.Record", rp.GetHistoryRecord(), &response) } return } @@ -468,7 +471,7 @@ func (rs *RedisStorage) SetRatingProfile(rpf *RatingProfile) (err error) { err = rs.db.Cmd("SET", utils.RATING_PROFILE_PREFIX+rpf.Id, result).Err if err == nil && historyScribe != nil { response := 0 - go historyScribe.Record(rpf.GetHistoryRecord(false), &response) + go historyScribe.Call("HistoryV1.Record", rpf.GetHistoryRecord(false), &response) } return } @@ -491,7 +494,7 @@ func (rs *RedisStorage) RemoveRatingProfile(key string) error { rpf := &RatingProfile{Id: key} if historyScribe != nil { response := 0 - go historyScribe.Record(rpf.GetHistoryRecord(true), &response) + go historyScribe.Call("HistoryV1.Record", rpf.GetHistoryRecord(true), &response) } } return nil @@ -559,7 +562,7 @@ func (rs *RedisStorage) SetDestination(dest *Destination) (err error) { err = rs.db.Cmd("SET", utils.DESTINATION_PREFIX+dest.Id, b.Bytes()).Err if err == nil && historyScribe != nil { response := 0 - go historyScribe.Record(dest.GetHistoryRecord(), &response) + go historyScribe.Call("HistoryV1.Record", dest.GetHistoryRecord(), &response) } return } @@ -587,6 +590,11 @@ func (rs *RedisStorage) SetActions(key string, as Actions) (err error) { return } +func (rs *RedisStorage) RemoveActions(key string) (err error) { + err = rs.db.Cmd("DEL", utils.ACTION_PREFIX+key).Err + return +} + func (rs *RedisStorage) GetSharedGroup(key string, skipCache bool) (sg *SharedGroup, err error) { key = utils.SHARED_GROUP_PREFIX + key if !skipCache { @@ -1090,3 +1098,21 @@ func (rs *RedisStorage) LogActionTiming(source string, at *ActionTiming, as Acti } return rs.db.Cmd("SET", utils.LOG_ACTION_TIMMING_PREFIX+source+"_"+time.Now().Format(time.RFC3339Nano), []byte(fmt.Sprintf("%v*%v", string(mat), string(mas)))).Err } + +func (rs *RedisStorage) SetStructVersion(v *StructVersion) (err error) { + var result []byte + result, err = rs.ms.Marshal(v) + if err != nil { + return + } + return rs.db.Cmd("SET", utils.VERSION_PREFIX+"struct", result).Err +} + +func (rs *RedisStorage) GetStructVersion() (rsv *StructVersion, err error) { + var values []byte + rsv = &StructVersion{} + if values, err = rs.db.Cmd("GET", utils.VERSION_PREFIX+"struct").Bytes(); err == nil { + err = rs.ms.Unmarshal(values, &rsv) + } + return +} diff --git a/engine/storage_sql.go b/engine/storage_sql.go index e7796e34f..5c8ab5528 100644 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -25,7 +25,6 @@ import ( "fmt" "io/ioutil" "path" - "strconv" "strings" "time" @@ -35,7 +34,7 @@ import ( type SQLStorage struct { Db *sql.DB - db gorm.DB + db *gorm.DB } func (self *SQLStorage) Close() { @@ -55,7 +54,7 @@ func (self *SQLStorage) Flush(scriptsPath string) (err error) { return nil } -func (self *SQLStorage) GetKeysForPrefix(prefix string) ([]string, error) { +func (self *SQLStorage) GetKeysForPrefix(prefix string, skipCache bool) ([]string, error) { return nil, utils.ErrNotImplemented } @@ -569,21 +568,24 @@ func (self *SQLStorage) SetTpAccountActions(aas []TpAccountAction) error { return nil } -func (self *SQLStorage) LogCallCost(cgrid, runid, source string, cc *CallCost) error { - if cc == nil { +func (self *SQLStorage) SetSMCost(smc *SMCost) error { + if smc.CostDetails == nil { return nil } - tss, err := json.Marshal(cc) + tss, err := json.Marshal(smc.CostDetails) if err != nil { utils.Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err)) return err } tx := self.db.Begin() cd := &TBLSMCosts{ - Cgrid: cgrid, - RunID: runid, - CostSource: source, + Cgrid: smc.CGRID, + RunID: smc.RunID, + OriginHost: smc.OriginHost, + OriginID: smc.OriginID, + CostSource: smc.CostSource, CostDetails: string(tss), + Usage: smc.Usage, CreatedAt: time.Now(), } if tx.Save(cd).Error != nil { // Check further since error does not properly reflect duplicates here (sql: no rows in result set) @@ -594,19 +596,37 @@ func (self *SQLStorage) LogCallCost(cgrid, runid, source string, cc *CallCost) e return nil } -func (self *SQLStorage) GetCallCostLog(cgrid, runid string) (*CallCost, error) { - var tpCostDetail TBLSMCosts - if err := self.db.Where(&TBLSMCosts{Cgrid: cgrid, RunID: runid}).First(&tpCostDetail).Error; err != nil { +// GetSMCosts is used to retrieve one or multiple SMCosts based on filter +func (self *SQLStorage) GetSMCosts(cgrid, runid, originHost, originIDPrefix string) ([]*SMCost, error) { + var smCosts []*SMCost + q := self.db.Where(&TBLSMCosts{Cgrid: cgrid, RunID: runid}) + if originIDPrefix != "" { + q = self.db.Where(&TBLSMCosts{OriginHost: originHost, RunID: runid}).Where(fmt.Sprintf("origin_id LIKE '%s%%'", originIDPrefix)) + } + results := make([]*TBLSMCosts, 0) + if err := q.Find(&results).Error; err != nil { return nil, err } - if len(tpCostDetail.CostDetails) == 0 { - return nil, nil // No costs returned + for _, result := range results { + if len(result.CostDetails) == 0 { + continue + } + smc := &SMCost{ + CGRID: result.Cgrid, + RunID: result.RunID, + OriginHost: result.OriginHost, + OriginID: result.OriginID, + CostSource: result.CostSource, + Usage: result.Usage, + CostDetails: &CallCost{}, + } + if err := json.Unmarshal([]byte(result.CostDetails), smc.CostDetails); err != nil { + return nil, err + } + smCosts = append(smCosts, smc) } - var cc CallCost - if err := json.Unmarshal([]byte(tpCostDetail.CostDetails), &cc); err != nil { - return nil, err - } - return &cc, nil + + return smCosts, nil } func (self *SQLStorage) LogActionTrigger(ubId, source string, at *ActionTrigger, as Actions) (err error) { @@ -655,8 +675,8 @@ func (self *SQLStorage) SetCDR(cdr *CDR, allowUpdate bool) error { return saved.Error } tx = self.db.Begin() - updated := tx.Model(TBLCDRs{}).Where(&TBLCDRs{Cgrid: cdr.CGRID, RunID: cdr.RunID}).Updates( - &TBLCDRs{ + updated := tx.Model(&TBLCDRs{}).Where(&TBLCDRs{Cgrid: cdr.CGRID, RunID: cdr.RunID, OriginID: cdr.OriginID}).Updates( + TBLCDRs{ OriginHost: cdr.OriginHost, Source: cdr.Source, OriginID: cdr.OriginID, @@ -698,9 +718,6 @@ func (self *SQLStorage) GetCDRs(qryFltr *utils.CDRsFilter, remove bool) ([]*CDR, q := self.db.Table(utils.TBL_CDRS).Select("*") if qryFltr.Unscoped { q = q.Unscoped() - } else { - // Query filter - q = q.Where("(deleted_at IS NULL OR deleted_at <= '0001-01-02')") // Soft deletes } // Add filters, use in to replace the high number of ORs if len(qryFltr.CGRIDs) != 0 { @@ -937,18 +954,20 @@ func (self *SQLStorage) GetCDRs(qryFltr *utils.CDRsFilter, remove bool) ([]*CDR, q.Find(&results) for _, result := range results { - var extraFieldsMp map[string]string - if err := json.Unmarshal([]byte(result.ExtraFields), &extraFieldsMp); err != nil { - return nil, 0, fmt.Errorf("JSON unmarshal error for cgrid: %s, runid: %v, error: %s", result.Cgrid, result.RunID, err.Error()) + extraFieldsMp := make(map[string]string) + if result.ExtraFields != "" { + if err := json.Unmarshal([]byte(result.ExtraFields), &extraFieldsMp); err != nil { + return nil, 0, fmt.Errorf("JSON unmarshal error for cgrid: %s, runid: %v, error: %s", result.Cgrid, result.RunID, err.Error()) + } } var callCost CallCost - if len(result.CostDetails) != 0 { + if result.CostDetails != "" { if err := json.Unmarshal([]byte(result.CostDetails), &callCost); err != nil { return nil, 0, fmt.Errorf("JSON unmarshal callcost error for cgrid: %s, runid: %v, error: %s", result.Cgrid, result.RunID, err.Error()) } } - usageDur, _ := time.ParseDuration(strconv.FormatFloat(result.Usage, 'f', -1, 64) + "s") - pddDur, _ := time.ParseDuration(strconv.FormatFloat(result.Pdd, 'f', -1, 64) + "s") + usageDur := time.Duration(result.Usage * utils.NANO_MULTIPLIER) + pddDur := time.Duration(result.Pdd * utils.NANO_MULTIPLIER) storCdr := &CDR{ CGRID: result.Cgrid, RunID: result.RunID, diff --git a/engine/storage_test.go b/engine/storage_test.go index cdb9d6802..06c40e0d1 100644 --- a/engine/storage_test.go +++ b/engine/storage_test.go @@ -274,7 +274,7 @@ func TestDifferentUuid(t *testing.T) { func TestStorageTask(t *testing.T) { // clean previous unused tasks - for i := 0; i < 18; i++ { + for i := 0; i < 20; i++ { ratingStorage.PopTask() } @@ -303,7 +303,7 @@ func TestStorageTask(t *testing.T) { t.Error("Error poping task: ", task, err) } if task, err := ratingStorage.PopTask(); err == nil && task != nil { - t.Errorf("Error poping task %+v, %v: ", task, err) + t.Errorf("Error poping task %+v, %v ", task, err) } } diff --git a/engine/storage_utils.go b/engine/storage_utils.go index 8994020fd..464431a74 100644 --- a/engine/storage_utils.go +++ b/engine/storage_utils.go @@ -156,6 +156,14 @@ func ConfigureCdrStorage(db_type, host, port, name, user, pass string, maxConn, type SMCost struct { CGRID string RunID string + OriginHost string + OriginID string CostSource string + Usage float64 CostDetails *CallCost } + +type AttrCDRSStoreSMCost struct { + Cost *SMCost + CheckDuplicate bool +} diff --git a/engine/timespans.go b/engine/timespans.go index 8a716f307..33bc79e8d 100644 --- a/engine/timespans.go +++ b/engine/timespans.go @@ -612,6 +612,26 @@ func (ts *TimeSpan) SplitByRatingPlan(rp *RatingInfo) (newTs *TimeSpan) { return } +// Splits the given timespan on activation period's activation time. +func (ts *TimeSpan) SplitByDay() (newTs *TimeSpan) { + if ts.TimeStart.Day() == ts.TimeEnd.Day() || utils.TimeIs0h(ts.TimeEnd) { + return + } + + splitDate := ts.TimeStart.AddDate(0, 0, 1) + splitDate = time.Date(splitDate.Year(), splitDate.Month(), splitDate.Day(), 0, 0, 0, 0, splitDate.Location()) + newTs = &TimeSpan{ + TimeStart: splitDate, + TimeEnd: ts.TimeEnd, + } + newTs.copyRatingInfo(ts) + newTs.DurationIndex = ts.DurationIndex + ts.TimeEnd = splitDate + ts.SetNewDurationIndex(newTs) + // Logger.Debug(fmt.Sprintf("RP SPLITTING: %+v %+v", ts, newTs)) + return +} + // Returns the starting time of this timespan func (ts *TimeSpan) GetGroupStart() time.Duration { s := ts.DurationIndex - ts.GetDuration() diff --git a/engine/tp_reader.go b/engine/tp_reader.go index bf662a335..4eaf2e81f 100644 --- a/engine/tp_reader.go +++ b/engine/tp_reader.go @@ -433,7 +433,7 @@ func (tpr *TpReader) LoadLCRs() (err error) { } } if !found && tpr.ratingStorage != nil { - if keys, err := tpr.ratingStorage.GetKeysForPrefix(utils.RATING_PROFILE_PREFIX + ratingProfileSearchKey); err != nil { + if keys, err := tpr.ratingStorage.GetKeysForPrefix(utils.RATING_PROFILE_PREFIX+ratingProfileSearchKey, true); err != nil { return fmt.Errorf("[LCR] error querying ratingDb %s", err.Error()) } else if len(keys) != 0 { found = true @@ -514,7 +514,7 @@ func (tpr *TpReader) LoadActions() (err error) { } } acts[idx] = &Action{ - Id: tag + strconv.Itoa(idx), + Id: tag, ActionType: tpact.Identifier, //BalanceType: tpact.BalanceType, Weight: tpact.Weight, @@ -531,11 +531,11 @@ func (tpr *TpReader) LoadActions() (err error) { } if tpact.Units != "" && tpact.Units != utils.ANY { - u, err := strconv.ParseFloat(tpact.Units, 64) + vf, err := utils.ParseBalanceFilterValue(tpact.Units) if err != nil { return err } - acts[idx].Balance.Value = utils.Float64Pointer(u) + acts[idx].Balance.Value = vf } if tpact.BalanceWeight != "" && tpact.BalanceWeight != utils.ANY { @@ -990,7 +990,7 @@ func (tpr *TpReader) LoadAccountActionsFiltered(qriedAA *TpAccountAction) error } } acts[idx] = &Action{ - Id: tag + strconv.Itoa(idx), + Id: tag, ActionType: tpact.Identifier, //BalanceType: tpact.BalanceType, Weight: tpact.Weight, @@ -1007,11 +1007,11 @@ func (tpr *TpReader) LoadAccountActionsFiltered(qriedAA *TpAccountAction) error } if tpact.Units != "" && tpact.Units != utils.ANY { - u, err := strconv.ParseFloat(tpact.Units, 64) + vf, err := utils.ParseBalanceFilterValue(tpact.Units) if err != nil { return err } - acts[idx].Balance.Value = utils.Float64Pointer(u) + acts[idx].Balance.Value = vf } if tpact.BalanceWeight != "" && tpact.BalanceWeight != utils.ANY { @@ -1338,7 +1338,7 @@ func (tpr *TpReader) LoadCdrStatsFiltered(tag string, save bool) (err error) { } } acts[idx] = &Action{ - Id: tag + strconv.Itoa(idx), + Id: tag, ActionType: tpact.Identifier, //BalanceType: tpact.BalanceType, Weight: tpact.Weight, @@ -1355,11 +1355,11 @@ func (tpr *TpReader) LoadCdrStatsFiltered(tag string, save bool) (err error) { } if tpact.Units != "" && tpact.Units != utils.ANY { - u, err := strconv.ParseFloat(tpact.Units, 64) + vf, err := utils.ParseBalanceFilterValue(tpact.Units) if err != nil { return err } - acts[idx].Balance.Value = utils.Float64Pointer(u) + acts[idx].Balance.Value = vf } if tpact.BalanceWeight != "" && tpact.BalanceWeight != utils.ANY { diff --git a/engine/users.go b/engine/users.go index 66d573de2..d49f14335 100644 --- a/engine/users.go +++ b/engine/users.go @@ -2,12 +2,12 @@ package engine import ( "fmt" + "reflect" "sort" "strings" "sync" "github.com/cgrates/cgrates/utils" - "github.com/cgrates/rpcclient" ) type UserProfile struct { @@ -139,21 +139,21 @@ func (um *UserMap) ReloadUsers(in string, reply *string) error { return nil } -func (um *UserMap) SetUser(up UserProfile, reply *string) error { +func (um *UserMap) SetUser(up *UserProfile, reply *string) error { um.mu.Lock() defer um.mu.Unlock() - if err := um.accountingDb.SetUser(&up); err != nil { + if err := um.accountingDb.SetUser(up); err != nil { *reply = err.Error() return err } um.table[up.GetId()] = up.Profile um.properties[up.GetId()] = &prop{weight: up.Weight, masked: up.Masked} - um.addIndex(&up, um.indexKeys) + um.addIndex(up, um.indexKeys) *reply = utils.OK return nil } -func (um *UserMap) RemoveUser(up UserProfile, reply *string) error { +func (um *UserMap) RemoveUser(up *UserProfile, reply *string) error { um.mu.Lock() defer um.mu.Unlock() if err := um.accountingDb.RemoveUser(up.GetId()); err != nil { @@ -162,12 +162,12 @@ func (um *UserMap) RemoveUser(up UserProfile, reply *string) error { } delete(um.table, up.GetId()) delete(um.properties, up.GetId()) - um.deleteIndex(&up) + um.deleteIndex(up) *reply = utils.OK return nil } -func (um *UserMap) UpdateUser(up UserProfile, reply *string) error { +func (um *UserMap) UpdateUser(up *UserProfile, reply *string) error { um.mu.Lock() defer um.mu.Unlock() m, found := um.table[up.GetId()] @@ -212,7 +212,7 @@ func (um *UserMap) UpdateUser(up UserProfile, reply *string) error { return nil } -func (um *UserMap) GetUsers(up UserProfile, results *UserProfiles) error { +func (um *UserMap) GetUsers(up *UserProfile, results *UserProfiles) error { um.mu.RLock() defer um.mu.RUnlock() table := um.table // no index @@ -402,44 +402,32 @@ func (um *UserMap) GetIndexes(in string, reply *map[string][]string) error { return nil } -type ProxyUserService struct { - Client *rpcclient.RpcClient -} - -func NewProxyUserService(addr string, attempts, reconnects int) (*ProxyUserService, error) { - client, err := rpcclient.NewRpcClient("tcp", addr, attempts, reconnects, utils.GOB, nil) - if err != nil { - return nil, err +func (um *UserMap) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(um).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented } - return &ProxyUserService{Client: client}, nil -} -func (ps *ProxyUserService) SetUser(ud UserProfile, reply *string) error { - return ps.Client.Call("UsersV1.SetUser", ud, reply) -} + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} -func (ps *ProxyUserService) RemoveUser(ud UserProfile, reply *string) error { - return ps.Client.Call("UsersV1.RemoveUser", ud, reply) -} - -func (ps *ProxyUserService) UpdateUser(ud UserProfile, reply *string) error { - return ps.Client.Call("UsersV1.UpdateUser", ud, reply) -} - -func (ps *ProxyUserService) GetUsers(ud UserProfile, users *UserProfiles) error { - return ps.Client.Call("UsersV1.GetUsers", ud, users) -} - -func (ps *ProxyUserService) AddIndex(indexes []string, reply *string) error { - return ps.Client.Call("UsersV1.AddIndex", indexes, reply) -} - -func (ps *ProxyUserService) GetIndexes(in string, reply *map[string][]string) error { - return ps.Client.Call("UsersV1.AddIndex", in, reply) -} - -func (ps *ProxyUserService) ReloadUsers(in string, reply *string) error { - return ps.Client.Call("UsersV1.ReloadUsers", in, reply) + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err } // extraFields - Field name in the interface containing extraFields information @@ -484,7 +472,7 @@ func LoadUserProfile(in interface{}, extraFields string) error { } } ups := UserProfiles{} - if err := userService.GetUsers(*up, &ups); err != nil { + if err := userService.Call("UsersV1.GetUsers", up, &ups); err != nil { return err } if len(ups) > 0 { diff --git a/engine/users_test.go b/engine/users_test.go index 442b22411..fa91da244 100644 --- a/engine/users_test.go +++ b/engine/users_test.go @@ -36,7 +36,7 @@ var testMap2 = UserMap{ func TestUsersAdd(t *testing.T) { tm := newUserMap(accountingStorage, nil) var r string - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -57,7 +57,7 @@ func TestUsersAdd(t *testing.T) { func TestUsersUpdate(t *testing.T) { tm := newUserMap(accountingStorage, nil) var r string - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -88,7 +88,7 @@ func TestUsersUpdate(t *testing.T) { func TestUsersUpdateNotFound(t *testing.T) { tm := newUserMap(accountingStorage, nil) var r string - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -106,12 +106,12 @@ func TestUsersUpdateNotFound(t *testing.T) { func TestUsersUpdateInit(t *testing.T) { tm := newUserMap(accountingStorage, nil) var r string - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", } tm.SetUser(up, &r) - up = UserProfile{ + up = &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -132,7 +132,7 @@ func TestUsersUpdateInit(t *testing.T) { func TestUsersRemove(t *testing.T) { tm := newUserMap(accountingStorage, nil) var r string - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -158,7 +158,7 @@ func TestUsersRemove(t *testing.T) { } func TestUsersGetFull(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -173,7 +173,7 @@ func TestUsersGetFull(t *testing.T) { } func TestUsersGetFullMasked(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", } results := UserProfiles{} @@ -184,7 +184,7 @@ func TestUsersGetFullMasked(t *testing.T) { } func TestUsersGetFullUnMasked(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", Masked: true, } @@ -199,7 +199,7 @@ func TestUsersGetFullUnMasked(t *testing.T) { } func TestUsersGetTenant(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "testX", UserName: "user", Profile: map[string]string{ @@ -214,7 +214,7 @@ func TestUsersGetTenant(t *testing.T) { } func TestUsersGetUserName(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "userX", Profile: map[string]string{ @@ -229,7 +229,7 @@ func TestUsersGetUserName(t *testing.T) { } func TestUsersGetNotFoundProfile(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -244,7 +244,7 @@ func TestUsersGetNotFoundProfile(t *testing.T) { } func TestUsersGetMissingTenant(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ UserName: "user", Profile: map[string]string{ "t": "v", @@ -258,7 +258,7 @@ func TestUsersGetMissingTenant(t *testing.T) { } func TestUsersGetMissingUserName(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Tenant: "test", Profile: map[string]string{ "t": "v", @@ -272,7 +272,7 @@ func TestUsersGetMissingUserName(t *testing.T) { } func TestUsersGetMissingId(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "t": "v", }, @@ -285,7 +285,7 @@ func TestUsersGetMissingId(t *testing.T) { } func TestUsersGetMissingIdTwo(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "t": "v", "x": "y", @@ -299,7 +299,7 @@ func TestUsersGetMissingIdTwo(t *testing.T) { } func TestUsersGetMissingIdTwoSort(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "t": "v", "x": "y", @@ -316,7 +316,7 @@ func TestUsersGetMissingIdTwoSort(t *testing.T) { } func TestUsersGetMissingIdTwoSortWeight(t *testing.T) { - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "a": "b", "c": "d", @@ -367,7 +367,7 @@ func TestUsersGetFullindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -385,7 +385,7 @@ func TestUsersGetTenantindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Tenant: "testX", UserName: "user", Profile: map[string]string{ @@ -403,7 +403,7 @@ func TestUsersGetUserNameindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "userX", Profile: map[string]string{ @@ -421,7 +421,7 @@ func TestUsersGetNotFoundProfileindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -439,7 +439,7 @@ func TestUsersGetMissingTenantindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ UserName: "user", Profile: map[string]string{ "t": "v", @@ -456,7 +456,7 @@ func TestUsersGetMissingUserNameindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Tenant: "test", Profile: map[string]string{ "t": "v", @@ -473,7 +473,7 @@ func TestUsersGetMissingIdindex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "t": "v", }, @@ -489,7 +489,7 @@ func TestUsersGetMissingIdTwoINdex(t *testing.T) { var r string testMap.index = make(map[string]map[string]bool) // reset index testMap.AddIndex([]string{"t", "x", "UserName", "Tenant"}, &r) - up := UserProfile{ + up := &UserProfile{ Profile: map[string]string{ "t": "v", "x": "y", @@ -509,7 +509,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { if len(tm.index) != 0 { t.Error("error adding indexes: ", tm.index) } - tm.SetUser(UserProfile{ + tm.SetUser(&UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ @@ -519,7 +519,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { if len(tm.index) != 1 || !tm.index["t:v"]["test:user"] { t.Error("error adding indexes: ", tm.index) } - tm.SetUser(UserProfile{ + tm.SetUser(&UserProfile{ Tenant: "test", UserName: "best", Profile: map[string]string{ @@ -531,7 +531,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { !tm.index["t:v"]["test:best"] { t.Error("error adding indexes: ", tm.index) } - tm.UpdateUser(UserProfile{ + tm.UpdateUser(&UserProfile{ Tenant: "test", UserName: "best", Profile: map[string]string{ @@ -543,7 +543,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { !tm.index["t:v1"]["test:best"] { t.Error("error adding indexes: ", tm.index) } - tm.UpdateUser(UserProfile{ + tm.UpdateUser(&UserProfile{ Tenant: "test", UserName: "best", Profile: map[string]string{ @@ -555,7 +555,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { !tm.index["t:v"]["test:best"] { t.Error("error adding indexes: ", tm.index) } - tm.RemoveUser(UserProfile{ + tm.RemoveUser(&UserProfile{ Tenant: "test", UserName: "best", Profile: map[string]string{ @@ -567,7 +567,7 @@ func TestUsersAddUpdateRemoveIndexes(t *testing.T) { tm.index["t:v"]["test:best"] { t.Error("error adding indexes: ", tm.index) } - tm.RemoveUser(UserProfile{ + tm.RemoveUser(&UserProfile{ Tenant: "test", UserName: "user", Profile: map[string]string{ diff --git a/engine/version.go b/engine/version.go new file mode 100644 index 000000000..08849db5c --- /dev/null +++ b/engine/version.go @@ -0,0 +1,211 @@ +package engine + +import ( + "errors" + "fmt" + + "github.com/cgrates/cgrates/utils" +) + +func CheckVersion() error { + // get current db version + dbVersion, err := accountingStorage.GetStructVersion() + if err != nil { + if lhList, err := accountingStorage.GetLoadHistory(1, true); err != nil || len(lhList) == 0 { + // no data, write version + if err := accountingStorage.SetStructVersion(CurrentVersion); err != nil { + utils.Logger.Warning(fmt.Sprintf("Could not write current version to db: %v", err)) + } + } else { + // has data but no version => run migration + msg := "Could not detect data structures version: run appropriate migration" + utils.Logger.Crit(msg) + return errors.New(msg) + } + } else { + // comparing versions + if len(CurrentVersion.CompareAndMigrate(dbVersion)) > 0 { + // write the new values + msg := "Migration needed: please backup cgr data and run cgr-cloader -migrate" + utils.Logger.Crit(msg) + return errors.New(msg) + } + } + return nil +} + +var ( + CurrentVersion = &StructVersion{ + Destinations: "1", + RatingPlans: "1", + RatingProfiles: "1", + Lcrs: "1", + DerivedChargers: "1", + Actions: "1", + ActionPlans: "1", + ActionTriggers: "1", + SharedGroups: "1", + Accounts: "1", + CdrStats: "1", + Users: "1", + Alias: "1", + PubSubs: "1", + LoadHistory: "1", + Cdrs: "1", + SMCosts: "1", + } +) + +type StructVersion struct { + // rating + Destinations string + RatingPlans string + RatingProfiles string + Lcrs string + DerivedChargers string + Actions string + ActionPlans string + ActionTriggers string + SharedGroups string + // accounting + Accounts string + CdrStats string + Users string + Alias string + PubSubs string + LoadHistory string + // cdr + Cdrs string + SMCosts string +} + +type MigrationInfo struct { + Prefix string + DbVersion string + CurrentVersion string +} + +func (sv *StructVersion) CompareAndMigrate(dbVer *StructVersion) []*MigrationInfo { + var migrationInfoList []*MigrationInfo + if sv.Destinations != dbVer.Destinations { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.DESTINATION_PREFIX, + DbVersion: dbVer.Destinations, + CurrentVersion: CurrentVersion.Destinations, + }) + + } + if sv.RatingPlans != dbVer.RatingPlans { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.RATING_PLAN_PREFIX, + DbVersion: dbVer.RatingPlans, + CurrentVersion: CurrentVersion.RatingPlans, + }) + } + if sv.RatingProfiles != dbVer.RatingProfiles { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.RATING_PROFILE_PREFIX, + DbVersion: dbVer.RatingProfiles, + CurrentVersion: CurrentVersion.RatingProfiles, + }) + } + if sv.Lcrs != dbVer.Lcrs { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.LCR_PREFIX, + DbVersion: dbVer.Lcrs, + CurrentVersion: CurrentVersion.Lcrs, + }) + } + if sv.DerivedChargers != dbVer.DerivedChargers { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.DERIVEDCHARGERS_PREFIX, + DbVersion: dbVer.DerivedChargers, + CurrentVersion: CurrentVersion.DerivedChargers, + }) + } + if sv.Actions != dbVer.Actions { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.ACTION_PREFIX, + DbVersion: dbVer.Actions, + CurrentVersion: CurrentVersion.Actions, + }) + } + if sv.ActionPlans != dbVer.ActionPlans { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.ACTION_PLAN_PREFIX, + DbVersion: dbVer.ActionPlans, + CurrentVersion: CurrentVersion.ActionPlans, + }) + } + if sv.ActionTriggers != dbVer.ActionTriggers { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.ACTION_TRIGGER_PREFIX, + DbVersion: dbVer.ActionTriggers, + CurrentVersion: CurrentVersion.ActionTriggers, + }) + } + if sv.SharedGroups != dbVer.SharedGroups { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.SHARED_GROUP_PREFIX, + DbVersion: dbVer.SharedGroups, + CurrentVersion: CurrentVersion.SharedGroups, + }) + } + if sv.Accounts != dbVer.Accounts { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.ACCOUNT_PREFIX, + DbVersion: dbVer.Accounts, + CurrentVersion: CurrentVersion.Accounts, + }) + } + if sv.CdrStats != dbVer.CdrStats { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.CDR_STATS_PREFIX, + DbVersion: dbVer.CdrStats, + CurrentVersion: CurrentVersion.CdrStats, + }) + } + if sv.Users != dbVer.Users { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.USERS_PREFIX, + DbVersion: dbVer.Users, + CurrentVersion: CurrentVersion.Users, + }) + } + if sv.Alias != dbVer.Alias { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.ALIASES_PREFIX, + DbVersion: dbVer.Alias, + CurrentVersion: CurrentVersion.Alias, + }) + } + if sv.PubSubs != dbVer.PubSubs { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.PUBSUB_SUBSCRIBERS_PREFIX, + DbVersion: dbVer.PubSubs, + CurrentVersion: CurrentVersion.PubSubs, + }) + } + if sv.LoadHistory != dbVer.LoadHistory { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.LOADINST_KEY, + DbVersion: dbVer.LoadHistory, + CurrentVersion: CurrentVersion.LoadHistory, + }) + } + if sv.Cdrs != dbVer.Cdrs { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.CDRS_SOURCE, + DbVersion: dbVer.RatingPlans, + CurrentVersion: CurrentVersion.RatingPlans, + }) + } + if sv.SMCosts != dbVer.SMCosts { + migrationInfoList = append(migrationInfoList, &MigrationInfo{ + Prefix: utils.SMG, + DbVersion: dbVer.SMCosts, + CurrentVersion: CurrentVersion.SMCosts, + }) + } + return migrationInfoList +} diff --git a/general_tests/rpcclient_it_test.go b/general_tests/rpcclient_it_test.go new file mode 100644 index 000000000..987b30662 --- /dev/null +++ b/general_tests/rpcclient_it_test.go @@ -0,0 +1,154 @@ +/* +Real-time Charging System for Telecom & ISP environments +Copyright (C) ITsysCOM GmbH + +This program is free software: you can Storagetribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITH*out ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + +package general_tests + +import ( + "os/exec" + "path" + "testing" + "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" +) + +var rpcITCfgPath1, rpcITCfgPath2 string +var rpcITCfg1, rpcITCfg2 *config.CGRConfig +var rpcRAL1, rpcRAL2 *rpcclient.RpcClient +var rpcPoolFirst *rpcclient.RpcClientPool +var ral1, ral2 *exec.Cmd +var err error +var ral1ID, ral2ID string + +func TestRPCITInitCfg(t *testing.T) { + if !*testIntegration { + return + } + rpcITCfgPath1 = path.Join(*dataDir, "conf", "samples", "multiral1") + rpcITCfgPath2 = path.Join(*dataDir, "conf", "samples", "multiral2") + // Init config first + rpcITCfg1, err = config.NewCGRConfigFromFolder(rpcITCfgPath1) + if err != nil { + t.Error(err) + } + rpcITCfg2, err = config.NewCGRConfigFromFolder(rpcITCfgPath2) + if err != nil { + t.Error(err) + } +} + +func TestRPCITStartEngine(t *testing.T) { + if !*testIntegration { + return + } + if ral1, err = engine.StopStartEngine(rpcITCfgPath1, *waitRater); err != nil { + t.Fatal(err) + } + if ral2, err = engine.StartEngine(rpcITCfgPath2, *waitRater); err != nil { + t.Fatal(err) + } +} + +// Connect rpc client to rater +func TestRPCITRpcConnPool(t *testing.T) { + if !*testIntegration { + return + } + rpcPoolFirst = rpcclient.NewRpcClientPool(rpcclient.POOL_FIRST) + rpcRAL1, err = rpcclient.NewRpcClient("tcp", rpcITCfg1.RPCJSONListen, 3, 1, rpcclient.JSON_RPC, nil) + if err != nil { + t.Fatal(err) + } + rpcPoolFirst.AddClient(rpcRAL1) + rpcRAL2, err = rpcclient.NewRpcClient("tcp", rpcITCfg2.RPCJSONListen, 3, 1, rpcclient.JSON_RPC, nil) + if err != nil { + t.Fatal(err) + } + rpcPoolFirst.AddClient(rpcRAL2) +} + +// Connect rpc client to rater +func TestRPCITStatusFirstInitial(t *testing.T) { + if !*testIntegration { + return + } + var status map[string]interface{} + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { + t.Error(err) + } else if status[utils.InstanceID].(string) == "" { + t.Error("Empty InstanceID received") + } else { + ral1ID = status[utils.InstanceID].(string) + } + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { // Make sure second time we land on the same instance + t.Error(err) + } else if status[utils.InstanceID].(string) != ral1ID { + t.Errorf("Expecting: %s, received: %s", ral1ID, status[utils.InstanceID].(string)) + } +} + +// Connect rpc client to rater +func TestRPCITStatusFirstFailover(t *testing.T) { + if !*testIntegration { + return + } + if err := ral1.Process.Kill(); err != nil { // Kill the first RAL + t.Error(err) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) + var status map[string]interface{} + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { + t.Error(err) + } else if status[utils.InstanceID].(string) == "" { + t.Error("Empty InstanceID received") + } else { + ral1ID = status[utils.InstanceID].(string) + } + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { // Make sure second time we land on the same instance + t.Error(err) + } else if status[utils.InstanceID].(string) != ral1ID { + t.Errorf("Expecting: %s, received: %s", ral1ID, status[utils.InstanceID].(string)) + } else { + ral2ID = status[utils.InstanceID].(string) + } +} + +func TestRPCITStatusFirstFailback(t *testing.T) { + if !*testIntegration { + return + } + if ral1, err = engine.StartEngine(rpcITCfgPath1, *waitRater); err != nil { + t.Fatal(err) + } + var status map[string]interface{} + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { + t.Error(err) + } else if status[utils.InstanceID].(string) == ral2ID { + t.Error("Should receive new ID") + } else { + ral1ID = status[utils.InstanceID].(string) + } + if err := rpcPoolFirst.Call("Responder.Status", "", &status); err != nil { // Make sure second time we land on the same instance + t.Error(err) + } else if status[utils.InstanceID].(string) != ral1ID { + t.Errorf("Expecting: %s, received: %s", ral1ID, status[utils.InstanceID].(string)) + } +} diff --git a/general_tests/tp_it_test.go b/general_tests/tp_it_test.go new file mode 100644 index 000000000..0a1681ec1 --- /dev/null +++ b/general_tests/tp_it_test.go @@ -0,0 +1,397 @@ +package general_tests + +import ( + "net/rpc" + "net/rpc/jsonrpc" + "path" + "testing" + "time" + + "github.com/cgrates/cgrates/apier/v1" + "github.com/cgrates/cgrates/apier/v2" + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" +) + +var tpCfgPath string +var tpCfg *config.CGRConfig +var tpRPC *rpc.Client +var tpLoadInst engine.LoadInstance // Share load information between tests + +func TestTpInitCfg(t *testing.T) { + if !*testIntegration { + return + } + tpCfgPath = path.Join(*dataDir, "conf", "samples", "tutlocal") + // Init config first + var err error + tpCfg, err = config.NewCGRConfigFromFolder(tpCfgPath) + if err != nil { + t.Error(err) + } + tpCfg.DataFolderPath = *dataDir // Share DataFolderPath through config towards StoreDb for Flush() + config.SetCgrConfig(tpCfg) +} + +// Remove data in both rating and accounting db +func TestTpResetDataDb(t *testing.T) { + if !*testIntegration { + return + } + if err := engine.InitDataDb(tpCfg); err != nil { + t.Fatal(err) + } +} + +// Wipe out the cdr database +func TestTpResetStorDb(t *testing.T) { + if !*testIntegration { + return + } + if err := engine.InitStorDb(tpCfg); err != nil { + t.Fatal(err) + } +} + +// Start CGR Engine +func TestTpStartEngine(t *testing.T) { + if !*testIntegration { + return + } + if _, err := engine.StopStartEngine(tpCfgPath, *waitRater); err != nil { + t.Fatal(err) + } +} + +// Connect rpc client to rater +func TestTpRpcConn(t *testing.T) { + if !*testIntegration { + return + } + var err error + tpRPC, err = jsonrpc.Dial("tcp", tpCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed + if err != nil { + t.Fatal(err) + } +} + +// Load the tariff plan, creating accounts and their balances +func TestTpLoadTariffPlanFromFolder(t *testing.T) { + if !*testIntegration { + return + } + attrs := &utils.AttrLoadTpFromFolder{FolderPath: path.Join(*dataDir, "tariffplans", "testtp")} + if err := tpRPC.Call("ApierV2.LoadTariffPlanFromFolder", attrs, &tpLoadInst); err != nil { + t.Error(err) + } else if tpLoadInst.LoadId == "" { + t.Error("Empty loadId received, loadInstance: ", tpLoadInst) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for scheduler to execute topups +} + +func TestTpBalanceCounter(t *testing.T) { + if !*testIntegration { + return + } + tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC) + cd := engine.CallDescriptor{ + Direction: "*out", + Category: "call", + Tenant: "cgrates.org", + Subject: "1001", + Destination: "+49", + DurationIndex: 0, + TimeStart: tStart, + TimeEnd: tStart.Add(time.Duration(20) * time.Second), + } + var cc engine.CallCost + if err := tpRPC.Call("Responder.Debit", cd, &cc); err != nil { + t.Error("Got error on Responder.GetCost: ", err.Error()) + } else if cc.GetDuration() != 20*time.Second { + t.Errorf("Calling Responder.MaxDebit got callcost: %v", cc.GetDuration()) + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } else if acnt.UnitCounters[utils.MONETARY][1].Counters[0].Value != 20.0 { + t.Errorf("Calling ApierV2.GetBalance received: %s", utils.ToIJSON(acnt)) + } +} + +func TestTpActionTriggers(t *testing.T) { + if !*testIntegration { + return + } + var atrs engine.ActionTriggers + if err := tpRPC.Call("ApierV1.GetActionTriggers", v1.AttrGetActionTriggers{GroupIDs: []string{}}, &atrs); err != nil { + t.Error("Got error on ApierV1.GetActionTriggers: ", err.Error()) + } else if len(atrs) != 9 { + t.Errorf("Calling v1.GetActionTriggers got: %v", atrs) + } + var reply string + if err := tpRPC.Call("ApierV1.SetActionTrigger", v1.AttrSetActionTrigger{ + GroupID: "TestATR", + UniqueID: "Unique atr id", + BalanceID: utils.StringPointer("BID1"), + }, &reply); err != nil { + t.Error("Got error on ApierV1.SetActionTrigger: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling v1.SetActionTrigger got: %v", reply) + } + + if err := tpRPC.Call("ApierV1.GetActionTriggers", v1.AttrGetActionTriggers{GroupIDs: []string{}}, &atrs); err != nil { + t.Error("Got error on ApierV1.GetActionTriggers: ", err.Error()) + } else if len(atrs) != 10 { + t.Errorf("Calling v1.GetActionTriggers got: %v", atrs) + } + if err := tpRPC.Call("ApierV1.GetActionTriggers", v1.AttrGetActionTriggers{GroupIDs: []string{"TestATR"}}, &atrs); err != nil { + t.Error("Got error on ApierV1.GetActionTriggers: ", err.Error()) + } else if len(atrs) != 1 { + t.Errorf("Calling v1.GetActionTriggers got: %v", atrs) + } + if atrs[0].ID != "TestATR" || + atrs[0].UniqueID != "Unique atr id" || + *atrs[0].Balance.ID != "BID1" { + t.Error("Wrong action trigger set: ", utils.ToIJSON(atrs[0])) + } +} + +func TestTpZeroCost(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1012"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } + balanceValueBefore := acnt.BalanceMap[utils.MONETARY][0].Value + tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC) + cd := engine.CallDescriptor{ + Direction: "*out", + Category: "call", + Tenant: "cgrates.org", + Subject: "free", + Account: "1012", + Destination: "+49", + DurationIndex: 0, + TimeStart: tStart, + TimeEnd: tStart.Add(time.Duration(20) * time.Second), + } + var cc engine.CallCost + if err := tpRPC.Call("Responder.Debit", cd, &cc); err != nil { + t.Error("Got error on Responder.Debit: ", err.Error()) + } else if cc.GetDuration() != 20*time.Second { + t.Errorf("Calling Responder.MaxDebit got callcost: %v", utils.ToIJSON(cc)) + } + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } else if acnt.BalanceMap[utils.MONETARY][0].Value != balanceValueBefore { + t.Errorf("Calling ApierV2.GetAccount received: %s", utils.ToIJSON(acnt)) + } +} + +func TestTpZeroNegativeCost(t *testing.T) { + if !*testIntegration { + return + } + tStart := time.Date(2016, 3, 31, 0, 0, 0, 0, time.UTC) + cd := engine.CallDescriptor{ + Direction: "*out", + Category: "call", + Tenant: "cgrates.org", + Subject: "free", + Account: "1013", + Destination: "+4915", + DurationIndex: 0, + TimeStart: tStart, + TimeEnd: tStart.Add(time.Duration(20) * time.Second), + } + var cc engine.CallCost + if err := tpRPC.Call("Responder.Debit", cd, &cc); err != nil { + t.Error("Got error on Responder.GetCost: ", err.Error()) + } else if cc.GetDuration() != 20*time.Second { + t.Errorf("Calling Responder.MaxDebit got callcost: %v", utils.ToIJSON(cc)) + } + var acnt engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1013"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } else if acnt.BalanceMap[utils.VOICE][0].Value != 100.0 { + t.Errorf("Calling ApierV2.GetAccount received: %s", utils.ToIJSON(acnt)) + } +} + +func TestTpExecuteActionCgrRpc(t *testing.T) { + if !*testIntegration { + return + } + var reply string + if err := tpRPC.Call("ApierV2.ExecuteAction", utils.AttrExecuteAction{ActionsId: "RPC"}, &reply); err != nil { + t.Error("Got error on ApierV2.ExecuteAction: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ExecuteAction got reply: %s", reply) + } + var acnt engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "rpc"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } +} + +func TestTpCreateExecuteActionMatch(t *testing.T) { + if !*testIntegration { + return + } + var reply string + if err := tpRPC.Call("ApierV2.SetActions", utils.AttrSetActions{ + ActionsId: "PAYMENT_2056bd2fe137082970f97102b64e42fd", + Actions: []*utils.TPAction{ + &utils.TPAction{ + BalanceType: "*monetary", + Directions: "*out", + Identifier: "*topup", + RatingSubject: "", + Units: "10.500000", + Weight: 10, + }, + }, + }, &reply); err != nil { + t.Error("Got error on ApierV2.SetActions: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ApierV2.SetActions got reply: %s", reply) + } + if err := tpRPC.Call("ApierV2.ExecuteAction", utils.AttrExecuteAction{ + Tenant: "cgrates.org", + Account: "1015", + ActionsId: "PAYMENT_2056bd2fe137082970f97102b64e42fd", + }, &reply); err != nil { + t.Error("Got error on ApierV2.ExecuteAction: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ExecuteAction got reply: %s", reply) + } + if err := tpRPC.Call("ApierV2.ExecuteAction", utils.AttrExecuteAction{ + Tenant: "cgrates.org", + Account: "1015", + ActionsId: "PAYMENT_2056bd2fe137082970f97102b64e42fd", + }, &reply); err != nil { + t.Error("Got error on ApierV2.ExecuteAction: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ExecuteAction got reply: %s", reply) + } + var acnt engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1015"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error("Got error on ApierV2.GetAccount: ", err.Error()) + } + if len(acnt.BalanceMap) != 1 || + len(acnt.BalanceMap[utils.MONETARY]) != 1 || + acnt.BalanceMap[utils.MONETARY].GetTotalValue() != 21 { + t.Error("error matching previous created balance: ", utils.ToIJSON(acnt.BalanceMap)) + } +} + +func TestTpSetRemActions(t *testing.T) { + if !*testIntegration { + return + } + var reply string + if err := tpRPC.Call("ApierV2.SetActions", utils.AttrSetActions{ + ActionsId: "TO_BE_DELETED", + Actions: []*utils.TPAction{ + &utils.TPAction{ + BalanceType: "*monetary", + Directions: "*out", + Identifier: "*topup", + RatingSubject: "", + Units: "10.500000", + Weight: 10, + }, + }, + }, &reply); err != nil { + t.Error("Got error on ApierV2.SetActions: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ApierV2.SetActions got reply: %s", reply) + } + actionsMap := make(map[string]engine.Actions) + if err := tpRPC.Call("ApierV2.GetActions", v2.AttrGetActions{ + ActionIDs: []string{"PAYMENT_2056bd2fe137082970f97102b64e42fd"}, + }, &actionsMap); err != nil { + t.Error("Got error on ApierV2.GetActions: ", err.Error()) + } else if len(actionsMap) != 1 { + t.Errorf("Calling ApierV2.GetActions got reply: %s", utils.ToIJSON(actionsMap)) + } + if err := tpRPC.Call("ApierV2.RemActions", v1.AttrRemActions{ + ActionIDs: []string{"PAYMENT_2056bd2fe137082970f97102b64e42fd"}, + }, &reply); err != nil { + t.Error("Got error on ApierV2.RemActions: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ApierV2.RemActions got reply: %s", reply) + } + if err := tpRPC.Call("ApierV2.GetActions", v2.AttrGetActions{ + ActionIDs: []string{"PAYMENT_2056bd2fe137082970f97102b64e42fd"}, + }, &actionsMap); err == nil { + t.Error("no error on ApierV2.GetActions: ", err) + } +} + +func TestTpRemActionsRefenced(t *testing.T) { + if !*testIntegration { + return + } + + // no more reference check for sake of speed! + + actionsMap := make(map[string]engine.Actions) + if err := tpRPC.Call("ApierV2.GetActions", v2.AttrGetActions{ + ActionIDs: []string{"TOPUP_VOICE"}, + }, &actionsMap); err != nil { + t.Error("Got error on ApierV2.GetActions: ", err.Error()) + } else if len(actionsMap) != 1 { + t.Errorf("Calling ApierV2.GetActions got reply: %s", utils.ToIJSON(actionsMap)) + } + var reply string + if err := tpRPC.Call("ApierV2.RemActions", v1.AttrRemActions{ + ActionIDs: []string{"TOPUP_VOICE"}, + }, &reply); err != nil { + t.Error("Error on ApierV2.RemActions: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ApierV2.RemActions got reply: %s", reply) + } + if err := tpRPC.Call("ApierV2.GetActions", v2.AttrGetActions{ + ActionIDs: []string{"PAYMENT_2056bd2fe137082970f97102b64e42fd"}, + }, &actionsMap); err == nil { + t.Error("no error on ApierV2.GetActions: ", err) + } +} + +func TestApierResetAccountActionTriggers(t *testing.T) { + if !*testIntegration { + return + } + var acnt engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1005"} + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.ActionTriggers[0].Executed == true { + t.Errorf("wrong action trigger executed flag: %s", utils.ToIJSON(acnt.ActionTriggers)) + } + var reply string + if err := tpRPC.Call("ApierV2.ResetAccountActionTriggers", v1.AttrResetAccountActionTriggers{ + Tenant: "cgrates.org", + Account: "1005", + GroupID: "STANDARD_TRIGGERS", + Executed: true, + }, &reply); err != nil { + t.Error("Error on ApierV2.ResetAccountActionTriggers: ", err.Error()) + } else if reply != utils.OK { + t.Errorf("Calling ApierV2.ResetAccountActionTriggers got reply: %s", reply) + } + if err := tpRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.ActionTriggers[0].Executed == false { + t.Errorf("wrong action trigger executed flag: %s", utils.ToIJSON(acnt.ActionTriggers)) + } +} diff --git a/general_tests/tut_smgeneric_it_test.go b/general_tests/tut_smgeneric_it_test.go index 889686b1f..0c72cada8 100644 --- a/general_tests/tut_smgeneric_it_test.go +++ b/general_tests/tut_smgeneric_it_test.go @@ -114,7 +114,7 @@ func TestTutSMGCacheStats(t *testing.T) { } var rcvStats *utils.CacheStats - expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 7, ActionPlans: 4, SharedGroups: 1, Aliases: 1, + expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 8, ActionPlans: 4, SharedGroups: 1, Aliases: 1, DerivedChargers: 1, LcrProfiles: 5, CdrStats: 6, Users: 3, LastLoadId: smgLoadInst.LoadId, LastLoadTime: smgLoadInst.LoadTime.Format(time.RFC3339)} var args utils.AttrCacheStats if err := tutSMGRpc.Call("ApierV2.GetCacheStats", args, &rcvStats); err != nil { diff --git a/general_tests/tutorial_local_test.go b/general_tests/tutorial_local_test.go index 5d4f08f58..d57ce2fdd 100644 --- a/general_tests/tutorial_local_test.go +++ b/general_tests/tutorial_local_test.go @@ -115,7 +115,7 @@ func TestTutLocalCacheStats(t *testing.T) { } var rcvStats *utils.CacheStats - expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 7, ActionPlans: 4, SharedGroups: 1, Aliases: 1, + expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 4, RatingProfiles: 9, Actions: 8, ActionPlans: 4, SharedGroups: 1, Aliases: 1, DerivedChargers: 1, LcrProfiles: 5, CdrStats: 6, Users: 3, LastLoadId: loadInst.LoadId, LastLoadTime: loadInst.LoadTime.Format(time.RFC3339)} var args utils.AttrCacheStats if err := tutLocalRpc.Call("ApierV2.GetCacheStats", args, &rcvStats); err != nil { @@ -427,6 +427,7 @@ func TestTutLocalMaxDebit(t *testing.T) { TimeStart: tStart, TimeEnd: tStart.Add(time.Duration(120) * time.Second), } + cd.CgrID = "1" if err := tutLocalRpc.Call("Responder.MaxDebit", cd, &cc); err != nil { t.Error("Got error on Responder.GetCost: ", err.Error()) } else if cc.GetDuration() == 120 { @@ -443,6 +444,7 @@ func TestTutLocalMaxDebit(t *testing.T) { TimeStart: tStart, TimeEnd: tStart.Add(time.Duration(120) * time.Second), } + cd.CgrID = "2" if err := tutLocalRpc.Call("Responder.MaxDebit", cd, &cc); err != nil { t.Error("Got error on Responder.GetCost: ", err.Error()) } else if cc.GetDuration() != time.Duration(62)*time.Second { // We have as strategy *dsconnect @@ -543,7 +545,7 @@ func TestTutLocalProcessExternalCdr(t *testing.T) { Usage: "1", PDD: "7.0", ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, } var reply string - if err := tutLocalRpc.Call("CdrsV2.ProcessExternalCdr", cdr, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessExternalCdr", cdr, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -563,7 +565,7 @@ func TestTutLocalProcessExternalCdrUP(t *testing.T) { ExtraFields: map[string]string{"Cli": "+4986517174964", "fieldextr2": "valextr2", "SysUserName": utils.USERS}, } var reply string - if err := tutLocalRpc.Call("CdrsV2.ProcessExternalCdr", cdr, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessExternalCdr", cdr, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -643,7 +645,7 @@ func TestTutLocalCostErrors(t *testing.T) { Usage: "1", PDD: "7.0", ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, } var reply string - if err := tutLocalRpc.Call("CdrsV2.ProcessExternalCdr", cdr, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessExternalCdr", cdr, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -669,7 +671,7 @@ func TestTutLocalCostErrors(t *testing.T) { SetupTime: "2014-08-04T13:00:00Z", AnswerTime: "2014-08-04T13:00:07Z", Usage: "1", PDD: "7.0", ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, } - if err := tutLocalRpc.Call("CdrsV2.ProcessExternalCdr", cdr2, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessExternalCdr", cdr2, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -694,7 +696,7 @@ func TestTutLocalCostErrors(t *testing.T) { SetupTime: "2014-08-04T13:00:00Z", AnswerTime: "2014-08-04T13:00:07Z", Usage: "1", PDD: "7.0", ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}, } - if err := tutLocalRpc.Call("CdrsV2.ProcessExternalCdr", cdr3, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessExternalCdr", cdr3, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -716,7 +718,7 @@ func TestTutLocalCostErrors(t *testing.T) { } // Make sure queueids were created -func TestTutFsCallsCdrStats(t *testing.T) { +func TestTutLocalCdrStats(t *testing.T) { if !*testLocal { return } @@ -884,7 +886,7 @@ func TestTutLocalLcrQos(t *testing.T) { ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}} var reply string for _, cdr := range []*engine.CDR{testCdr1, testCdr2} { - if err := tutLocalRpc.Call("CdrsV2.ProcessCdr", cdr, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", cdr, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -914,7 +916,7 @@ func TestTutLocalLcrQos(t *testing.T) { Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1003", Subject: "1003", Destination: "1004", SetupTime: time.Date(2014, 12, 7, 8, 42, 24, 0, time.UTC), AnswerTime: time.Date(2014, 12, 7, 8, 42, 26, 0, time.UTC), Usage: time.Duration(180) * time.Second, Supplier: "suppl2"} - if err := tutLocalRpc.Call("CdrsV2.ProcessCdr", testCdr3, &reply); err != nil { + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", testCdr3, &reply); err != nil { t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -981,7 +983,7 @@ func TestTutLocalLcrQosThreshold(t *testing.T) { SetupTime: time.Date(2014, 12, 7, 8, 42, 24, 0, time.UTC), AnswerTime: time.Date(2014, 12, 7, 8, 42, 26, 0, time.UTC), Usage: time.Duration(60) * time.Second, Supplier: "suppl2"} var reply string - if err := tutLocalRpc.Call("CdrsV2.ProcessCdr", testCdr4, &reply); err != nil { // Should drop ACD under the 2m required by threshold, removing suppl2 from lcr + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", testCdr4, &reply); err != nil { // Should drop ACD under the 2m required by threshold, removing suppl2 from lcr t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -1045,7 +1047,7 @@ func TestTutLocalLcrQosThreshold(t *testing.T) { Direction: "*out", Tenant: "cgrates.org", Category: "call", Account: "1003", Subject: "1003", Destination: "1004", SetupTime: time.Date(2014, 12, 7, 8, 42, 24, 0, time.UTC), AnswerTime: time.Date(2014, 12, 7, 8, 42, 26, 0, time.UTC), Usage: time.Duration(1) * time.Second, Supplier: "suppl2"} - if err := tutLocalRpc.Call("CdrsV2.ProcessCdr", testCdr5, &reply); err != nil { // Should drop ACD under the 1m required by threshold, removing suppl2 from lcr + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", testCdr5, &reply); err != nil { // Should drop ACD under the 1m required by threshold, removing suppl2 from lcr t.Error("Unexpected error: ", err.Error()) } else if reply != utils.OK { t.Error("Unexpected reply received: ", reply) @@ -1290,6 +1292,101 @@ func TestTutLocalCdrStatsAfter(t *testing.T) { } */ +func TestTutLocalPrepaidCDRWithSMCost(t *testing.T) { + if !*testLocal { + return + } + cdr := &engine.CDR{CGRID: utils.Sha1("testprepaid1", time.Date(2016, 4, 6, 13, 29, 24, 0, time.UTC).String()), + ToR: utils.VOICE, OriginID: "testprepaid1", OriginHost: "192.168.1.1", Source: "TEST_PREPAID_CDR_SMCOST1", RequestType: utils.META_PREPAID, + Direction: utils.OUT, Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "1003", + SetupTime: time.Date(2016, 4, 6, 13, 29, 24, 0, time.UTC), AnswerTime: time.Date(2016, 4, 6, 13, 30, 0, 0, time.UTC), + Usage: time.Duration(90) * time.Second, Supplier: "suppl1", + ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}} + smCost := &engine.SMCost{CGRID: cdr.CGRID, + RunID: utils.META_DEFAULT, + OriginHost: cdr.OriginHost, + OriginID: cdr.OriginID, + CostSource: "TestTutLocalPrepaidCDRWithSMCost", + Usage: cdr.Usage.Seconds(), + CostDetails: &engine.CallCost{ + Direction: utils.OUT, + Destination: "1003", + Timespans: []*engine.TimeSpan{ + &engine.TimeSpan{ + TimeStart: time.Date(2016, 4, 6, 13, 30, 0, 0, time.UTC).Local(), // MongoDB saves timestamps in local timezone + TimeEnd: time.Date(2016, 4, 6, 13, 31, 30, 0, time.UTC).Local(), + DurationIndex: 0, + RateInterval: &engine.RateInterval{ + Rating: &engine.RIRate{Rates: engine.RateGroups{ + &engine.Rate{GroupIntervalStart: 0, Value: 0.01, RateIncrement: 10 * time.Second, RateUnit: time.Second}}}}, + }, + }, + TOR: utils.VOICE, + }, + } + var reply string + if err := tutLocalRpc.Call("CdrsV1.StoreSMCost", &engine.AttrCDRSStoreSMCost{Cost: smCost}, &reply); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if reply != utils.OK { + t.Error("Unexpected reply received: ", reply) + } + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", cdr, &reply); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if reply != utils.OK { + t.Error("Unexpected reply received: ", reply) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for CDR to be processed + var cdrs []*engine.ExternalCDR + req := utils.RPCCDRsFilter{RunIDs: []string{utils.META_DEFAULT}, CGRIDs: []string{cdr.CGRID}} + if err := tutLocalRpc.Call("ApierV2.GetCdrs", req, &cdrs); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(cdrs) != 1 { + t.Error("Unexpected number of CDRs returned: ", len(cdrs)) + } else { + if cdrs[0].OriginID != cdr.OriginID { + t.Errorf("Unexpected OriginID for Cdr received: %+v", cdrs[0]) + } + if cdrs[0].Cost != 0.9 { + t.Errorf("Unexpected Cost for Cdr received: %+v", cdrs[0]) + } + } +} + +func TestTutLocalPrepaidCDRWithoutSMCost(t *testing.T) { + if !*testLocal { + return + } + cdr := &engine.CDR{CGRID: utils.Sha1("testprepaid2", time.Date(2016, 4, 6, 13, 29, 24, 0, time.UTC).String()), + ToR: utils.VOICE, OriginID: "testprepaid2", OriginHost: "192.168.1.1", Source: "TEST_PREPAID_CDR_NO_SMCOST1", RequestType: utils.META_PREPAID, + Direction: utils.OUT, Tenant: "cgrates.org", Category: "call", Account: "1001", Subject: "1001", Destination: "1003", + SetupTime: time.Date(2016, 4, 6, 13, 29, 24, 0, time.UTC), AnswerTime: time.Date(2016, 4, 6, 13, 30, 0, 0, time.UTC), + Usage: time.Duration(90) * time.Second, Supplier: "suppl1", + ExtraFields: map[string]string{"field_extr1": "val_extr1", "fieldextr2": "valextr2"}} + var reply string + if err := tutLocalRpc.Call("CdrsV1.ProcessCdr", cdr, &reply); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if reply != utils.OK { + t.Error("Unexpected reply received: ", reply) + } + /* + time.Sleep(time.Duration(7000) * time.Millisecond) // Give time for CDR to be processed + var cdrs []*engine.ExternalCDR + req := utils.RPCCDRsFilter{RunIDs: []string{utils.META_DEFAULT}, CGRIDs: []string{cdr.CGRID}} + if err := tutLocalRpc.Call("ApierV2.GetCdrs", req, &cdrs); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(cdrs) != 1 { + t.Error("Unexpected number of CDRs returned: ", len(cdrs)) + } else { + if cdrs[0].OriginID != cdr.OriginID { + t.Errorf("Unexpected OriginID for Cdr received: %+v", cdrs[0]) + } + if cdrs[0].Cost != 0.9 { + t.Errorf("Unexpected Cost for Cdr received: %+v", cdrs[0]) + } + } + */ +} + func TestTutLocalStopCgrEngine(t *testing.T) { if !*testLocal { return diff --git a/glide.lock b/glide.lock index fa156bc51..69a1eef99 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: c4e3a1bdd7452ec3af195e09b8b3b1b9a61e36edfad557aeb01686706019c352 -updated: 2016-03-09T00:08:37.493018177+02:00 +hash: 5c488630d1d32687b7a3c3b22c47ceaf7eb3cffb764799706728a6accbcd3ff5 +updated: 2016-04-26T18:00:43.651987521+03:00 imports: - name: github.com/cenkalti/hub - version: 57d753b5f4856e77b3cf8ecce78c97215a7d324d + version: b864404b5f990410d56858a1b0a6fac23a85443f - name: github.com/cenkalti/rpc2 version: 2d1be381ce47537e9e076b2b76dc70933162e4e9 - name: github.com/cgrates/fsock @@ -12,7 +12,7 @@ imports: - name: github.com/cgrates/osipsdagram version: 3d6beed663452471dec3ca194137a30d379d9e8f - name: github.com/cgrates/rpcclient - version: 79661b1e514823a9ac93b2b9e97e037ee190ba47 + version: 9a6185f8a2093ce10f1a08242b0d757f24795800 - name: github.com/DisposaBoy/JsonConfigReader version: 33a99fdf1d5ee1f79b5077e9c06f955ad356d5f4 - name: github.com/fiorix/go-diameter @@ -26,39 +26,43 @@ imports: - diam/sm/smparser - diam/sm/smpeer - name: github.com/go-sql-driver/mysql - version: 0f2db9e6c9cff80a97ca5c2c5096242cc1554e16 + version: 7ebe0a500653eeb1859664bed5e48dec1e164e73 - name: github.com/gorhill/cronexpr - version: a557574d6c024ed6e36acc8b610f5f211c91568a + version: f0984319b44273e83de132089ae42b1810f4933b - name: github.com/jinzhu/gorm - version: 2f7811c55f286c55cfc3a2aefb5c4049b9cd5214 + version: 5174cc5c242a728b435ea2be8a2f7f998e15429b - name: github.com/jinzhu/inflection version: 3272df6c21d04180007eb3349844c89a3856bc25 - name: github.com/kr/pty version: f7ee69f31298ecbe5d2b349c711e2547a617d398 - name: github.com/lib/pq - version: 165a3529e799da61ab10faed1fabff3662d6193f + version: 3cd0097429be7d611bb644ef85b42bfb102ceea4 subpackages: - oid - name: github.com/mediocregopher/radix.v2 - version: 7bdaf7c45ec452ca691ab20535471e24460f0876 + version: 74e50e64194d2d2f4836212451c28b127f9d7fa1 subpackages: - pool - redis - name: github.com/peterh/liner - version: ad1edfd30321d8f006ccf05f1e0524adeb943060 + version: 49ca65981c3cd7db64145977af1d186e9d317afa - name: github.com/ugorji/go - version: 187fa0f8af224437e08ecb3f208c4d1a94859a61 + version: a396ed22fc049df733440d90efe17475e3929ccb subpackages: - codec - name: golang.org/x/net - version: a4bbce9fcae005b22ae5443f6af064d80a6f5a55 + version: b797637b7aeeed133049c7281bfa31dcc9ca42d6 subpackages: - websocket - context +- name: golang.org/x/sys + version: f64b50fbea64174967a8882830d621a18ee1548e + subpackages: + - unix - name: gopkg.in/fsnotify.v1 - version: 875cf421b32f8f1b31bd43776297876d01542279 + version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 - name: gopkg.in/mgo.v2 - version: d90005c5262a3463800497ea5a89aed5fe22c886 + version: b6e2fa371e64216a45e61072a96d4e3859f169da subpackages: - bson - internal/sasl diff --git a/history/file_scribe.go b/history/file_scribe.go index 8d023ba09..e6b3d31c7 100644 --- a/history/file_scribe.go +++ b/history/file_scribe.go @@ -28,8 +28,12 @@ import ( "os" "os/exec" "path/filepath" + "reflect" + "strings" "sync" "time" + + "github.com/cgrates/cgrates/utils" ) type FileScribe struct { @@ -170,3 +174,31 @@ func (s *FileScribe) save(filename string) error { f.Close() return s.gitCommit() } + +func (s *FileScribe) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(s).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented + } + + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} + + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err +} diff --git a/history/mock_scribe.go b/history/mock_scribe.go index 1192f9cb4..a529bef51 100644 --- a/history/mock_scribe.go +++ b/history/mock_scribe.go @@ -21,7 +21,11 @@ package history import ( "bufio" "bytes" + "reflect" + "strings" "sync" + + "github.com/cgrates/cgrates/utils" ) type MockScribe struct { @@ -64,3 +68,31 @@ func (s *MockScribe) GetBuffer(fn string) *bytes.Buffer { defer s.mu.Unlock() return s.BufMap[fn] } + +func (s *MockScribe) Call(serviceMethod string, args interface{}, reply interface{}) error { + parts := strings.Split(serviceMethod, ".") + if len(parts) != 2 { + return utils.ErrNotImplemented + } + // get method + method := reflect.ValueOf(s).MethodByName(parts[1]) + if !method.IsValid() { + return utils.ErrNotImplemented + } + + // construct the params + params := []reflect.Value{reflect.ValueOf(args), reflect.ValueOf(reply)} + + ret := method.Call(params) + if len(ret) != 1 { + return utils.ErrServerError + } + if ret[0].Interface() == nil { + return nil + } + err, ok := ret[0].Interface().(error) + if !ok { + return utils.ErrServerError + } + return err +} diff --git a/history/proxy_scribe.go b/history/proxy_scribe.go deleted file mode 100644 index 3b00ebc0a..000000000 --- a/history/proxy_scribe.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Rating system designed to be used in VoIP Carriers World -Copyright (C) 2012-2015 ITsysCOM - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see -*/ - -package history - -import ( - "github.com/cgrates/cgrates/utils" - "github.com/cgrates/rpcclient" -) - -type ProxyScribe struct { - Client *rpcclient.RpcClient -} - -func NewProxyScribe(addr string, attempts, reconnects int) (*ProxyScribe, error) { - client, err := rpcclient.NewRpcClient("tcp", addr, attempts, reconnects, utils.GOB, nil) - if err != nil { - return nil, err - } - return &ProxyScribe{Client: client}, nil -} - -func (ps *ProxyScribe) Record(rec Record, out *int) error { - return ps.Client.Call("Scribe.Record", rec, out) -} diff --git a/history/scribe.go b/history/scribe.go index 819bada44..2a6a1f3ba 100644 --- a/history/scribe.go +++ b/history/scribe.go @@ -30,10 +30,6 @@ const ( RATING_PROFILES_FN = "rating_profiles.json" ) -type Scribe interface { - Record(Record, *int) error -} - type Record struct { Id string Filename string diff --git a/packages/squeeze/rules b/packages/squeeze/rules index b813db9c6..34115a611 100755 --- a/packages/squeeze/rules +++ b/packages/squeeze/rules @@ -5,7 +5,6 @@ export DH_VERBOSE=1 export GOPATH=$(CURDIR) -export GO15VENDOREXPERIMENT=1 PKGDIR=debian/cgrates SRCDIR=src/github.com/cgrates/cgrates @@ -24,8 +23,8 @@ binary-arch: clean dh_installdirs mkdir -p src/github.com/cgrates ln -sf $(CURDIR) src/github.com/cgrates - go get -v github.com/Masterminds/glide - $(GOPATH)/bin/glide install + go get -u -v github.com/Masterminds/glide + $(GOPATH)/bin/glide install --force exec $(CURDIR)/build.sh mkdir -p $(PKGDIR)/usr/bin cp $(GOPATH)/bin/cgr-* $(PKGDIR)/usr/bin/ diff --git a/sessionmanager/data_it_test.go b/sessionmanager/data_it_test.go new file mode 100644 index 000000000..ee73c7f38 --- /dev/null +++ b/sessionmanager/data_it_test.go @@ -0,0 +1,959 @@ +package sessionmanager + +import ( + "net/rpc/jsonrpc" + "path" + "testing" + "time" + + "github.com/cgrates/cgrates/config" + "github.com/cgrates/cgrates/engine" + "github.com/cgrates/cgrates/utils" +) + +func TestSMGDataInitCfg(t *testing.T) { + if !*testIntegration { + return + } + daCfgPath = path.Join(*dataDir, "conf", "samples", "smg") + // Init config first + var err error + daCfg, err = config.NewCGRConfigFromFolder(daCfgPath) + if err != nil { + t.Error(err) + } + daCfg.DataFolderPath = *dataDir // Share DataFolderPath through config towards StoreDb for Flush() + config.SetCgrConfig(daCfg) +} + +// Remove data in both rating and accounting db +func TestSMGDataResetDataDb(t *testing.T) { + if !*testIntegration { + return + } + if err := engine.InitDataDb(daCfg); err != nil { + t.Fatal(err) + } +} + +// Wipe out the cdr database +func TestSMGDataResetStorDb(t *testing.T) { + if !*testIntegration { + return + } + if err := engine.InitStorDb(daCfg); err != nil { + t.Fatal(err) + } +} + +// Start CGR Engine +func TestSMGDataStartEngine(t *testing.T) { + if !*testIntegration { + return + } + if _, err := engine.StopStartEngine(daCfgPath, *waitRater); err != nil { + t.Fatal(err) + } +} + +// Connect rpc client to rater +func TestSMGDataApierRpcConn(t *testing.T) { + if !*testIntegration { + return + } + var err error + smgRPC, err = jsonrpc.Dial("tcp", daCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed + if err != nil { + t.Fatal(err) + } +} + +// Load the tariff plan, creating accounts and their balances +func TestSMGDataTPFromFolder(t *testing.T) { + if !*testIntegration { + return + } + attrs := &utils.AttrLoadTpFromFolder{FolderPath: path.Join(*dataDir, "tariffplans", "testtp")} + var loadInst engine.LoadInstance + if err := smgRPC.Call("ApierV2.LoadTariffPlanFromFolder", attrs, &loadInst); err != nil { + t.Error(err) + } + time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for scheduler to execute topups +} + +func TestSMGDataLastUsedData(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 50000000000.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123491", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:59", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998945280.000000 //1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123491", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:59", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998924800.000000 //20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123491", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:59", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.LastUsed: "0", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 49999979520.000000 //20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } +} + +func TestSMGDataLastUsedMultipleData(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 49999979520.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:50", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998924800.000000 // 1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + aSessions := make([]*ActiveSession, 0) + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998904320.000000 // 20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1068576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998883840.000000 // 20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1088576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998863360.000000 // 20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1108576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998842880.000000 // 20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1128576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123492", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.LastUsed: "0", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 49999897600.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 0 { + t.Errorf("wrong active sessions: %+v", aSessions) + } +} + +func TestSMGDataDerivedChargingNoCredit(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1011"} + eAcntVal := 50000.0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.VOICE].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.VOICE].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234967", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1011", + utils.SUBJECT: "1011", + utils.DESTINATION: "+49", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:49", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "100", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + // the second derived charging run has no credit + + if maxUsage != 0 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 50000.0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.VOICE].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.VOICE].GetTotalValue()) + } +} + +func TestSMGDataTTLExpired(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 49999897600.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123494", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:52", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49998842880.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + time.Sleep(50 * time.Millisecond) + eAcntVal = 49998842880.000000 //1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } +} + +func TestSMGDataTTLExpiredMultiUpdates(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 49998842880.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123495", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:53", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49997788160.000000 //1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + aSessions := make([]*ActiveSession, 0) + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123495", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "20000", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49997767680.000000 // 20480 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + + time.Sleep(50 * time.Millisecond) + eAcntVal = 49997767680.000000 //0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 0 { + t.Errorf("wrong active sessions: %+v", aSessions) + } +} + +func TestSMGDataMultipleDataNoUsage(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 49997767680.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:54", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + aSessions := make([]*ActiveSession, 0) + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "0", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "0", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "0", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "0", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123496", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.LastUsed: "0", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 49997767680.000000 // refunded + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 0 { + t.Errorf("wrong active sessions: %+v", aSessions) + } +} + +func TestSMGDataMultipleDataConstantUsage(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1010"} + eAcntVal := 49997767680.000000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:55", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "1048576", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 1054720 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + aSessions := make([]*ActiveSession, 0) + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1048576 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "600", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1049176 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "600", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1049776 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "600", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1050376 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "1048576", + utils.LastUsed: "600", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 1.048576e+06 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 49996712960.000000 // 0 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 || aSessions[0].Usage.Seconds() != 1050976 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.DATA, + utils.ACCID: "123497", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1010", + utils.SUBJECT: "1010", + utils.DESTINATION: "222", + utils.CATEGORY: "data", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.LastUsed: "0", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 49997757440.000000 // 10240 (from the start) + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.DATA].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.DATA].GetTotalValue()) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 0 { + t.Errorf("wrong active sessions: %f", aSessions[0].Usage.Seconds()) + } +} diff --git a/sessionmanager/fsevent.go b/sessionmanager/fsevent.go index 8634be250..4ba9bdd4c 100644 --- a/sessionmanager/fsevent.go +++ b/sessionmanager/fsevent.go @@ -373,6 +373,7 @@ func (fsev FSEvent) ComputeLcr() bool { // Converts into CallDescriptor due to responder interface needs func (fsev FSEvent) AsCallDescriptor() (*engine.CallDescriptor, error) { lcrReq := &engine.LcrRequest{ + Direction: fsev.GetDirection(utils.META_DEFAULT), Tenant: fsev.GetTenant(utils.META_DEFAULT), Category: fsev.GetCategory(utils.META_DEFAULT), diff --git a/sessionmanager/fssessionmanager.go b/sessionmanager/fssessionmanager.go index 2d504888b..f18ee52ec 100644 --- a/sessionmanager/fssessionmanager.go +++ b/sessionmanager/fssessionmanager.go @@ -30,9 +30,10 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" "github.com/cgrates/fsock" + "github.com/cgrates/rpcclient" ) -func NewFSSessionManager(smFsConfig *config.SmFsConfig, rater, cdrs engine.Connector, timezone string) *FSSessionManager { +func NewFSSessionManager(smFsConfig *config.SmFsConfig, rater, cdrs rpcclient.RpcClientConnection, timezone string) *FSSessionManager { return &FSSessionManager{ cfg: smFsConfig, conns: make(map[string]*fsock.FSock), @@ -50,10 +51,11 @@ type FSSessionManager struct { cfg *config.SmFsConfig conns map[string]*fsock.FSock // Keep the list here for connection management purposes senderPools map[string]*fsock.FSockPool // Keep sender pools here - rater engine.Connector - cdrsrv engine.Connector - sessions *Sessions - timezone string + rater rpcclient.RpcClientConnection + cdrsrv rpcclient.RpcClientConnection + + sessions *Sessions + timezone string } func (sm *FSSessionManager) createHandlers() map[string][]func(string, string) { @@ -98,6 +100,7 @@ func (sm *FSSessionManager) setCgrLcr(ev engine.Event, connId string) error { return err } cd := &engine.CallDescriptor{ + CgrID: ev.GetCgrId(sm.Timezone()), Direction: ev.GetDirection(utils.META_DEFAULT), Tenant: ev.GetTenant(utils.META_DEFAULT), Category: ev.GetCategory(utils.META_DEFAULT), @@ -107,7 +110,7 @@ func (sm *FSSessionManager) setCgrLcr(ev engine.Event, connId string) error { TimeStart: startTime, TimeEnd: startTime.Add(config.CgrConfig().MaxCallDuration), } - if err := sm.rater.GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcrCost); err != nil { + if err := sm.rater.Call("Responder.GetLCR", &engine.AttrGetLcr{CallDescriptor: cd}, &lcrCost); err != nil { return err } supps := []string{} @@ -131,7 +134,7 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) { return } var maxCallDuration float64 // This will be the maximum duration this channel will be allowed to last - if err := sm.rater.GetDerivedMaxSessionTime(ev.AsStoredCdr(config.CgrConfig().DefaultTimezone), &maxCallDuration); err != nil { + if err := sm.rater.Call("Responder.GetDerivedMaxSessionTime", ev.AsStoredCdr(config.CgrConfig().DefaultTimezone), &maxCallDuration); err != nil { utils.Logger.Err(fmt.Sprintf(" Could not get max session time for %s, error: %s", ev.GetUUID(), err.Error())) } if maxCallDuration != -1 { // For calls different than unlimited, set limits @@ -146,13 +149,14 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) { // ComputeLcr if ev.ComputeLcr() { cd, err := fsev.AsCallDescriptor() + cd.CgrID = fsev.GetCgrId(sm.Timezone()) if err != nil { utils.Logger.Info(fmt.Sprintf(" LCR_PREPROCESS_ERROR: %s", err.Error())) sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR) return } var lcr engine.LCRCost - if err = sm.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { + if err = sm.Rater().Call("Responder.GetLCR", &engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { utils.Logger.Info(fmt.Sprintf(" LCR_API_ERROR: %s", err.Error())) sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR) } @@ -227,9 +231,9 @@ func (sm *FSSessionManager) onChannelHangupComplete(ev engine.Event) { func (sm *FSSessionManager) Connect() error { eventFilters := map[string]string{"Call-Direction": "inbound"} errChan := make(chan error) - for _, connCfg := range sm.cfg.Connections { + for _, connCfg := range sm.cfg.EventSocketConns { connId := utils.GenUUID() - fSock, err := fsock.NewFSock(connCfg.Server, connCfg.Password, connCfg.Reconnects, sm.createHandlers(), eventFilters, utils.Logger.(*syslog.Writer), connId) + fSock, err := fsock.NewFSock(connCfg.Address, connCfg.Password, connCfg.Reconnects, sm.createHandlers(), eventFilters, utils.Logger.(*syslog.Writer), connId) if err != nil { return err } else if !fSock.Connected() { @@ -242,7 +246,7 @@ func (sm *FSSessionManager) Connect() error { errChan <- err } }() - if fsSenderPool, err := fsock.NewFSockPool(5, connCfg.Server, connCfg.Password, 1, sm.cfg.MaxWaitConnection, + if fsSenderPool, err := fsock.NewFSockPool(5, connCfg.Address, connCfg.Password, 1, sm.cfg.MaxWaitConnection, make(map[string][]func(string, string)), make(map[string]string), utils.Logger.(*syslog.Writer), connId); err != nil { return fmt.Errorf("Cannot connect FreeSWITCH senders pool, error: %s", err.Error()) } else if fsSenderPool == nil { @@ -294,7 +298,7 @@ func (sm *FSSessionManager) DisconnectSession(ev engine.Event, connId, notify st func (sm *FSSessionManager) ProcessCdr(storedCdr *engine.CDR) error { var reply string - if err := sm.cdrsrv.ProcessCdr(storedCdr, &reply); err != nil { + if err := sm.cdrsrv.Call("CdrServer.ProcessCdr", storedCdr, &reply); err != nil { utils.Logger.Err(fmt.Sprintf(" Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", storedCdr.CGRID, storedCdr.OriginID, err.Error())) } return nil @@ -304,11 +308,11 @@ func (sm *FSSessionManager) DebitInterval() time.Duration { return sm.cfg.DebitInterval } -func (sm *FSSessionManager) CdrSrv() engine.Connector { +func (sm *FSSessionManager) CdrSrv() rpcclient.RpcClientConnection { return sm.cdrsrv } -func (sm *FSSessionManager) Rater() engine.Connector { +func (sm *FSSessionManager) Rater() rpcclient.RpcClientConnection { return sm.rater } diff --git a/sessionmanager/kamailiosm.go b/sessionmanager/kamailiosm.go index fc5946307..af7c5fb53 100644 --- a/sessionmanager/kamailiosm.go +++ b/sessionmanager/kamailiosm.go @@ -29,17 +29,18 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" "github.com/cgrates/kamevapi" + "github.com/cgrates/rpcclient" ) -func NewKamailioSessionManager(smKamCfg *config.SmKamConfig, rater, cdrsrv engine.Connector, timezone string) (*KamailioSessionManager, error) { +func NewKamailioSessionManager(smKamCfg *config.SmKamConfig, rater, cdrsrv rpcclient.RpcClientConnection, timezone string) (*KamailioSessionManager, error) { ksm := &KamailioSessionManager{cfg: smKamCfg, rater: rater, cdrsrv: cdrsrv, timezone: timezone, conns: make(map[string]*kamevapi.KamEvapi), sessions: NewSessions()} return ksm, nil } type KamailioSessionManager struct { cfg *config.SmKamConfig - rater engine.Connector - cdrsrv engine.Connector + rater rpcclient.RpcClientConnection + cdrsrv rpcclient.RpcClientConnection timezone string conns map[string]*kamevapi.KamEvapi sessions *Sessions @@ -64,7 +65,7 @@ func (self *KamailioSessionManager) onCgrAuth(evData []byte, connId string) { } var remainingDuration float64 var errMaxSession error - if errMaxSession = self.rater.GetDerivedMaxSessionTime(kev.AsStoredCdr(self.Timezone()), &remainingDuration); errMaxSession != nil { + if errMaxSession = self.rater.Call("Responder.GetDerivedMaxSessionTime", kev.AsStoredCdr(self.Timezone()), &remainingDuration); errMaxSession != nil { utils.Logger.Err(fmt.Sprintf(" Could not get max session time, error: %s", errMaxSession.Error())) } var supplStr string @@ -102,12 +103,13 @@ func (self *KamailioSessionManager) onCgrLcrReq(evData []byte, connId string) { func (self *KamailioSessionManager) getSuppliers(kev KamEvent) (string, error) { cd, err := kev.AsCallDescriptor() + cd.CgrID = kev.GetCgrId(self.timezone) if err != nil { utils.Logger.Info(fmt.Sprintf(" LCR_PREPROCESS_ERROR error: %s", err.Error())) return "", errors.New("LCR_PREPROCESS_ERROR") } var lcr engine.LCRCost - if err = self.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { + if err = self.Rater().Call("Responder.GetLCR", &engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { utils.Logger.Info(fmt.Sprintf(" LCR_API_ERROR error: %s", err.Error())) return "", errors.New("LCR_API_ERROR") } @@ -168,9 +170,9 @@ func (self *KamailioSessionManager) Connect() error { regexp.MustCompile("CGR_CALL_END"): []func([]byte, string){self.onCallEnd}, } errChan := make(chan error) - for _, connCfg := range self.cfg.Connections { + for _, connCfg := range self.cfg.EvapiConns { connId := utils.GenUUID() - if self.conns[connId], err = kamevapi.NewKamEvapi(connCfg.EvapiAddr, connId, connCfg.Reconnects, eventHandlers, utils.Logger.(*syslog.Writer)); err != nil { + if self.conns[connId], err = kamevapi.NewKamEvapi(connCfg.Address, connId, connCfg.Reconnects, eventHandlers, utils.Logger.(*syslog.Writer)); err != nil { return err } go func() { // Start reading in own goroutine, return on error @@ -196,10 +198,10 @@ func (self *KamailioSessionManager) DisconnectSession(ev engine.Event, connId, n func (self *KamailioSessionManager) DebitInterval() time.Duration { return self.cfg.DebitInterval } -func (self *KamailioSessionManager) CdrSrv() engine.Connector { +func (self *KamailioSessionManager) CdrSrv() rpcclient.RpcClientConnection { return self.cdrsrv } -func (self *KamailioSessionManager) Rater() engine.Connector { +func (self *KamailioSessionManager) Rater() rpcclient.RpcClientConnection { return self.rater } @@ -208,7 +210,7 @@ func (self *KamailioSessionManager) ProcessCdr(cdr *engine.CDR) error { return nil } var reply string - if err := self.cdrsrv.ProcessCdr(cdr, &reply); err != nil { + if err := self.cdrsrv.Call("CdrServer.ProcessCdr", cdr, &reply); err != nil { utils.Logger.Err(fmt.Sprintf(" Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", cdr.CGRID, cdr.OriginID, err.Error())) } return nil diff --git a/sessionmanager/osipssm.go b/sessionmanager/osipssm.go index 05f905cdd..de785188f 100644 --- a/sessionmanager/osipssm.go +++ b/sessionmanager/osipssm.go @@ -29,6 +29,7 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" "github.com/cgrates/osipsdagram" + "github.com/cgrates/rpcclient" ) /* @@ -80,7 +81,7 @@ duration:: */ -func NewOSipsSessionManager(smOsipsCfg *config.SmOsipsConfig, reconnects int, rater, cdrsrv engine.Connector, timezone string) (*OsipsSessionManager, error) { +func NewOSipsSessionManager(smOsipsCfg *config.SmOsipsConfig, reconnects int, rater, cdrsrv rpcclient.RpcClientConnection, timezone string) (*OsipsSessionManager, error) { osm := &OsipsSessionManager{cfg: smOsipsCfg, reconnects: reconnects, rater: rater, cdrsrv: cdrsrv, timezone: timezone, cdrStartEvents: make(map[string]*OsipsEvent), sessions: NewSessions()} osm.eventHandlers = map[string][]func(*osipsdagram.OsipsEvent){ "E_OPENSIPS_START": []func(*osipsdagram.OsipsEvent){osm.onOpensipsStart}, // Raised when OpenSIPS starts so we can register our event handlers @@ -94,8 +95,8 @@ func NewOSipsSessionManager(smOsipsCfg *config.SmOsipsConfig, reconnects int, ra type OsipsSessionManager struct { cfg *config.SmOsipsConfig reconnects int - rater engine.Connector - cdrsrv engine.Connector + rater rpcclient.RpcClientConnection + cdrsrv rpcclient.RpcClientConnection timezone string eventHandlers map[string][]func(*osipsdagram.OsipsEvent) evSubscribeStop chan struct{} // Reference towards the channel controlling subscriptions, keep it as reference so we do not need to copy it @@ -130,12 +131,12 @@ func (osm *OsipsSessionManager) DebitInterval() time.Duration { } // Returns the connection to local cdr database, used by session to log it's final costs -func (osm *OsipsSessionManager) CdrSrv() engine.Connector { +func (osm *OsipsSessionManager) CdrSrv() rpcclient.RpcClientConnection { return osm.cdrsrv } // Returns connection to rater/controller -func (osm *OsipsSessionManager) Rater() engine.Connector { +func (osm *OsipsSessionManager) Rater() rpcclient.RpcClientConnection { return osm.rater } @@ -152,7 +153,7 @@ func (osm *OsipsSessionManager) Shutdown() error { // Process the CDR with CDRS component func (osm *OsipsSessionManager) ProcessCdr(storedCdr *engine.CDR) error { var reply string - return osm.cdrsrv.ProcessCdr(storedCdr, &reply) + return osm.cdrsrv.Call("CdrServer.ProcessCdr", storedCdr, &reply) } // Disconnects the session diff --git a/sessionmanager/session.go b/sessionmanager/session.go index 31942cfdf..ca7a07482 100644 --- a/sessionmanager/session.go +++ b/sessionmanager/session.go @@ -58,7 +58,7 @@ func NewSession(ev engine.Event, connId string, sm SessionManager) *Session { sessionManager: sm, connId: connId, } - if err := sm.Rater().GetSessionRuns(ev.AsStoredCdr(s.sessionManager.Timezone()), &s.sessionRuns); err != nil || len(s.sessionRuns) == 0 { + if err := sm.Rater().Call("Responder.GetSessionRuns", ev.AsStoredCdr(s.sessionManager.Timezone()), &s.sessionRuns); err != nil || len(s.sessionRuns) == 0 { return nil } for runIdx := range s.sessionRuns { @@ -70,7 +70,7 @@ func NewSession(ev engine.Event, connId string, sm SessionManager) *Session { // the debit loop method (to be stoped by sending somenthing on stopDebit channel) func (s *Session) debitLoop(runIdx int) { nextCd := s.sessionRuns[runIdx].CallDescriptor - nextCd.CgrID = s.eventStart.GetCgrId("") + nextCd.CgrID = s.eventStart.GetCgrId(s.sessionManager.Timezone()) index := 0.0 debitPeriod := s.sessionManager.DebitInterval() for { @@ -86,7 +86,7 @@ func (s *Session) debitLoop(runIdx int) { nextCd.LoopIndex = index nextCd.DurationIndex += debitPeriod // first presumed duration cc := new(engine.CallCost) - if err := s.sessionManager.Rater().MaxDebit(nextCd, cc); err != nil { + if err := s.sessionManager.Rater().Call("Responder.MaxDebit", nextCd, cc); err != nil { utils.Logger.Err(fmt.Sprintf("Could not complete debit opperation: %v", err)) if err.Error() == utils.ErrUnauthorizedDestination.Error() { s.sessionManager.DisconnectSession(s.eventStart, s.connId, UNAUTHORIZED_DESTINATION) @@ -195,12 +195,22 @@ func (s *Session) Refund(lastCC *engine.CallCost, hangupTime time.Time) error { // show only what was actualy refunded (stopped in timespan) // utils.Logger.Info(fmt.Sprintf("Refund duration: %v", initialRefundDuration-refundDuration)) if len(refundIncrements) > 0 { - cd := lastCC.CreateCallDescriptor() - cd.Increments = refundIncrements + cd := &engine.CallDescriptor{ + CgrID: s.eventStart.GetCgrId(s.sessionManager.Timezone()), + Direction: lastCC.Direction, + Tenant: lastCC.Tenant, + Category: lastCC.Category, + Subject: lastCC.Subject, + Account: lastCC.Account, + Destination: lastCC.Destination, + TOR: lastCC.TOR, + Increments: refundIncrements, + } cd.Increments.Compress() utils.Logger.Info(fmt.Sprintf("Refunding duration %v with cd: %+v", refundDuration, cd)) var response float64 - if err := s.sessionManager.Rater().RefundIncrements(cd, &response); err != nil { + err := s.sessionManager.Rater().Call("Responder.RefundIncrements", cd, &response) + if err != nil { return err } } @@ -235,22 +245,23 @@ func (s *Session) SaveOperations() { cd := firstCC.CreateCallDescriptor() cd.Increments = roundIncrements var response float64 - if err := s.sessionManager.Rater().RefundRounding(cd, &response); err != nil { + if err := s.sessionManager.Rater().Call("Responder.RefundRounding", cd, &response); err != nil { utils.Logger.Err(fmt.Sprintf(" ERROR failed to refund rounding: %v", err)) } } + smCost := &engine.SMCost{ + CGRID: s.eventStart.GetCgrId(s.sessionManager.Timezone()), + CostSource: utils.SESSION_MANAGER_SOURCE, + RunID: sr.DerivedCharger.RunID, + OriginHost: s.eventStart.GetOriginatorIP(utils.META_DEFAULT), + OriginID: s.eventStart.GetUUID(), + CostDetails: firstCC, + } var reply string - err := s.sessionManager.CdrSrv().LogCallCost(&engine.CallCostLog{ - CgrId: s.eventStart.GetCgrId(s.sessionManager.Timezone()), - Source: utils.SESSION_MANAGER_SOURCE, - RunId: sr.DerivedCharger.RunID, - CallCost: firstCC, - CheckDuplicate: true, - }, &reply) - // this is a protection against the case when the close event is missed for some reason - // when the cdr arrives to cdrserver because our callcost is not there it will be rated - // as postpaid. When the close event finally arives we have to refund everything - if err != nil { + if err := s.sessionManager.CdrSrv().Call("CdrServer.StoreSMCost", engine.AttrCDRSStoreSMCost{Cost: smCost, CheckDuplicate: true}, &reply); err != nil { + // this is a protection against the case when the close event is missed for some reason + // when the cdr arrives to cdrserver because our callcost is not there it will be rated + // as postpaid. When the close event finally arives we have to refund everything if err == utils.ErrExists { s.Refund(firstCC, firstCC.Timespans[0].TimeStart) } else { @@ -270,7 +281,7 @@ func (s *Session) AsActiveSessions() []*ActiveSession { aSession := &ActiveSession{ CgrId: s.eventStart.GetCgrId(s.sessionManager.Timezone()), TOR: utils.VOICE, - AccId: s.eventStart.GetUUID(), + OriginID: s.eventStart.GetUUID(), CdrHost: s.eventStart.GetOriginatorIP(utils.META_DEFAULT), CdrSource: "FS_" + s.eventStart.GetName(), ReqType: s.eventStart.GetReqType(utils.META_DEFAULT), @@ -307,7 +318,7 @@ func (s *Session) AsActiveSessions() []*ActiveSession { type ActiveSession struct { CgrId string TOR string // type of record, meta-field, should map to one of the TORs hardcoded inside the server <*voice|*data|*sms|*generic> - AccId string // represents the unique accounting id given by the telecom switch generating the CDR + OriginID string // represents the unique accounting id given by the telecom switch generating the CDR CdrHost string // represents the IP address of the host generating the CDR (automatically populated by the server) CdrSource string // formally identifies the source of the CDR (free form field) ReqType string // matching the supported request types by the **CGRateS**, accepted values are hardcoded in the server . diff --git a/sessionmanager/session_test.go b/sessionmanager/session_test.go index 209577806..f23b7d37f 100644 --- a/sessionmanager/session_test.go +++ b/sessionmanager/session_test.go @@ -80,34 +80,40 @@ func TestSessionNilSession(t *testing.T) { } */ -type MockConnector struct { +type MockRpcClient struct { refundCd *engine.CallDescriptor } -func (mc *MockConnector) GetCost(*engine.CallDescriptor, *engine.CallCost) error { return nil } -func (mc *MockConnector) Debit(*engine.CallDescriptor, *engine.CallCost) error { return nil } -func (mc *MockConnector) MaxDebit(*engine.CallDescriptor, *engine.CallCost) error { return nil } -func (mc *MockConnector) RefundIncrements(cd *engine.CallDescriptor, reply *float64) error { +func (mc *MockRpcClient) Call(methodName string, arg interface{}, reply interface{}) error { + if cd, ok := arg.(*engine.CallDescriptor); ok { + mc.refundCd = cd + } + return nil +} +func (mc *MockRpcClient) GetCost(*engine.CallDescriptor, *engine.CallCost) error { return nil } +func (mc *MockRpcClient) Debit(*engine.CallDescriptor, *engine.CallCost) error { return nil } +func (mc *MockRpcClient) MaxDebit(*engine.CallDescriptor, *engine.CallCost) error { return nil } +func (mc *MockRpcClient) RefundIncrements(cd *engine.CallDescriptor, reply *float64) error { mc.refundCd = cd return nil } -func (mc *MockConnector) RefundRounding(cd *engine.CallDescriptor, reply *float64) error { +func (mc *MockRpcClient) RefundRounding(cd *engine.CallDescriptor, reply *float64) error { return nil } -func (mc *MockConnector) GetMaxSessionTime(*engine.CallDescriptor, *float64) error { return nil } -func (mc *MockConnector) GetDerivedChargers(*utils.AttrDerivedChargers, *utils.DerivedChargers) error { +func (mc *MockRpcClient) GetMaxSessionTime(*engine.CallDescriptor, *float64) error { return nil } +func (mc *MockRpcClient) GetDerivedChargers(*utils.AttrDerivedChargers, *utils.DerivedChargers) error { return nil } -func (mc *MockConnector) GetDerivedMaxSessionTime(*engine.CDR, *float64) error { return nil } -func (mc *MockConnector) GetSessionRuns(*engine.CDR, *[]*engine.SessionRun) error { return nil } -func (mc *MockConnector) ProcessCdr(*engine.CDR, *string) error { return nil } -func (mc *MockConnector) LogCallCost(*engine.CallCostLog, *string) error { return nil } -func (mc *MockConnector) GetLCR(*engine.AttrGetLcr, *engine.LCRCost) error { return nil } -func (mc *MockConnector) GetTimeout(int, *time.Duration) error { return nil } +func (mc *MockRpcClient) GetDerivedMaxSessionTime(*engine.CDR, *float64) error { return nil } +func (mc *MockRpcClient) GetSessionRuns(*engine.CDR, *[]*engine.SessionRun) error { return nil } +func (mc *MockRpcClient) ProcessCdr(*engine.CDR, *string) error { return nil } +func (mc *MockRpcClient) StoreSMCost(engine.AttrCDRSStoreSMCost, *string) error { return nil } +func (mc *MockRpcClient) GetLCR(*engine.AttrGetLcr, *engine.LCRCost) error { return nil } +func (mc *MockRpcClient) GetTimeout(int, *time.Duration) error { return nil } func TestSessionRefund(t *testing.T) { - mc := &MockConnector{} - s := &Session{sessionManager: &FSSessionManager{rater: mc}} + mc := &MockRpcClient{} + s := &Session{sessionManager: &FSSessionManager{rater: mc, timezone: time.UTC.String()}, eventStart: FSEvent{SETUP_TIME: time.Now().Format(time.RFC3339)}} ts := &engine.TimeSpan{ TimeStart: time.Date(2015, 6, 10, 14, 7, 0, 0, time.UTC), TimeEnd: time.Date(2015, 6, 10, 14, 7, 30, 0, time.UTC), @@ -126,8 +132,8 @@ func TestSessionRefund(t *testing.T) { } func TestSessionRefundAll(t *testing.T) { - mc := &MockConnector{} - s := &Session{sessionManager: &FSSessionManager{rater: mc}} + mc := &MockRpcClient{} + s := &Session{sessionManager: &FSSessionManager{rater: mc, timezone: time.UTC.String()}, eventStart: FSEvent{SETUP_TIME: time.Now().Format(time.RFC3339)}} ts := &engine.TimeSpan{ TimeStart: time.Date(2015, 6, 10, 14, 7, 0, 0, time.UTC), TimeEnd: time.Date(2015, 6, 10, 14, 7, 30, 0, time.UTC), @@ -146,8 +152,8 @@ func TestSessionRefundAll(t *testing.T) { } func TestSessionRefundManyAll(t *testing.T) { - mc := &MockConnector{} - s := &Session{sessionManager: &FSSessionManager{rater: mc}} + mc := &MockRpcClient{} + s := &Session{sessionManager: &FSSessionManager{rater: mc, timezone: time.UTC.String()}, eventStart: FSEvent{SETUP_TIME: time.Now().Format(time.RFC3339)}} ts1 := &engine.TimeSpan{ TimeStart: time.Date(2015, 6, 10, 14, 7, 0, 0, time.UTC), TimeEnd: time.Date(2015, 6, 10, 14, 7, 30, 0, time.UTC), diff --git a/sessionmanager/sessionmanager.go b/sessionmanager/sessionmanager.go index 8d2155f11..7a9b5696b 100644 --- a/sessionmanager/sessionmanager.go +++ b/sessionmanager/sessionmanager.go @@ -22,11 +22,12 @@ import ( "time" "github.com/cgrates/cgrates/engine" + "github.com/cgrates/rpcclient" ) type SessionManager interface { - Rater() engine.Connector - CdrSrv() engine.Connector + Rater() rpcclient.RpcClientConnection + CdrSrv() rpcclient.RpcClientConnection DebitInterval() time.Duration DisconnectSession(engine.Event, string, string) error WarnSessionMinDuration(string, string) diff --git a/sessionmanager/sessions.go b/sessionmanager/sessions.go index 65e4a25c1..690a9127a 100644 --- a/sessionmanager/sessions.go +++ b/sessionmanager/sessions.go @@ -28,7 +28,7 @@ import ( func NewSessions() *Sessions { return &Sessions{ sessionsMux: new(sync.Mutex), - guard: engine.NewGuardianLock(), + guard: engine.Guardian, } } diff --git a/sessionmanager/smg_event.go b/sessionmanager/smg_event.go index 643c8693f..2d6f7855f 100644 --- a/sessionmanager/smg_event.go +++ b/sessionmanager/smg_event.go @@ -177,6 +177,54 @@ func (self SMGenericEvent) GetLastUsed(fieldName string) (time.Duration, error) return utils.ParseDurationWithSecs(result) } +// GetSessionTTL retrieves SessionTTL setting out of SMGenericEvent +func (self SMGenericEvent) GetSessionTTL() time.Duration { + valIf, hasVal := self[utils.SessionTTL] + if !hasVal { + return time.Duration(0) + } + ttlStr, converted := utils.ConvertIfaceToString(valIf) + if !converted { + return time.Duration(0) + } + ttl, _ := utils.ParseDurationWithSecs(ttlStr) + return ttl +} + +// GetSessionTTLLastUsed retrieves SessionTTLLastUsed setting out of SMGenericEvent +func (self SMGenericEvent) GetSessionTTLLastUsed() *time.Duration { + valIf, hasVal := self[utils.SessionTTLLastUsed] + if !hasVal { + return nil + } + ttlStr, converted := utils.ConvertIfaceToString(valIf) + if !converted { + return nil + } + if ttl, err := utils.ParseDurationWithSecs(ttlStr); err != nil { + return nil + } else { + return &ttl + } +} + +// GetSessionTTLUsage retrieves SessionTTLUsage setting out of SMGenericEvent +func (self SMGenericEvent) GetSessionTTLUsage() *time.Duration { + valIf, hasVal := self[utils.SessionTTLUsage] + if !hasVal { + return nil + } + ttlStr, converted := utils.ConvertIfaceToString(valIf) + if !converted { + return nil + } + if ttl, err := utils.ParseDurationWithSecs(ttlStr); err != nil { + return nil + } else { + return &ttl + } +} + func (self SMGenericEvent) GetMaxUsage(fieldName string, cfgMaxUsage time.Duration) (time.Duration, error) { if fieldName == utils.META_DEFAULT { fieldName = utils.USAGE @@ -228,7 +276,7 @@ func (self SMGenericEvent) GetCdrSource() string { func (self SMGenericEvent) GetExtraFields() map[string]string { extraFields := make(map[string]string) for key, val := range self { - primaryFields := append(utils.PrimaryCdrFields, utils.EVENT_NAME, utils.LastUsed) + primaryFields := append(utils.PrimaryCdrFields, utils.EVENT_NAME) if utils.IsSliceMember(primaryFields, key) { continue } @@ -238,6 +286,18 @@ func (self SMGenericEvent) GetExtraFields() map[string]string { return extraFields } +func (self SMGenericEvent) GetFieldAsString(fieldName string) (string, error) { + valIf, hasVal := self[fieldName] + if !hasVal { + return "", utils.ErrNotFound + } + result, converted := utils.ConvertIfaceToString(valIf) + if !converted { + return "", utils.ErrNotConvertible + } + return result, nil +} + func (self SMGenericEvent) MissingParameter(timezone string) bool { switch self.GetName() { case utils.CGR_AUTHORIZATION: diff --git a/sessionmanager/smg_event_test.go b/sessionmanager/smg_event_test.go index 4cc80781c..336996943 100644 --- a/sessionmanager/smg_event_test.go +++ b/sessionmanager/smg_event_test.go @@ -127,7 +127,7 @@ func TestSMGenericEventParseFields(t *testing.T) { if smGev.GetOriginatorIP(utils.META_DEFAULT) != "127.0.0.1" { t.Error("Unexpected: ", smGev.GetOriginatorIP(utils.META_DEFAULT)) } - if extrFlds := smGev.GetExtraFields(); !reflect.DeepEqual(extrFlds, map[string]string{"Extra1": "Value1", "Extra2": "5"}) { + if extrFlds := smGev.GetExtraFields(); !reflect.DeepEqual(extrFlds, map[string]string{"Extra1": "Value1", "Extra2": "5", "LastUsed": "21s"}) { t.Error("Unexpected: ", extrFlds) } } @@ -191,3 +191,19 @@ func TestSMGenericEventAsLcrRequest(t *testing.T) { t.Errorf("Expecting: %+v, received: %+v", eLcrReq, lcrReq) } } + +func TestSMGenericEventGetFieldAsString(t *testing.T) { + smGev := SMGenericEvent{} + smGev[utils.EVENT_NAME] = "TEST_EVENT" + smGev[utils.TOR] = utils.VOICE + smGev[utils.ACCID] = "12345" + smGev[utils.DIRECTION] = utils.OUT + smGev[utils.ACCOUNT] = "account1" + smGev[utils.SUBJECT] = "subject1" + eFldVal := utils.VOICE + if strVal, err := smGev.GetFieldAsString(utils.TOR); err != nil { + t.Error(err) + } else if strVal != eFldVal { + t.Errorf("Expecting: %s, received: %s", eFldVal, strVal) + } +} diff --git a/sessionmanager/smg_it_test.go b/sessionmanager/smg_it_test.go index cd246596e..b14e4adf0 100644 --- a/sessionmanager/smg_it_test.go +++ b/sessionmanager/smg_it_test.go @@ -40,7 +40,7 @@ var daCfg *config.CGRConfig var smgRPC *rpc.Client var err error -func TestSMGInitCfg(t *testing.T) { +func TestSMGVoiceInitCfg(t *testing.T) { if !*testIntegration { return } @@ -56,7 +56,7 @@ func TestSMGInitCfg(t *testing.T) { } // Remove data in both rating and accounting db -func TestSMGResetDataDb(t *testing.T) { +func TestSMGVoiceResetDataDb(t *testing.T) { if !*testIntegration { return } @@ -66,7 +66,7 @@ func TestSMGResetDataDb(t *testing.T) { } // Wipe out the cdr database -func TestSMGResetStorDb(t *testing.T) { +func TestSMGVoiceResetStorDb(t *testing.T) { if !*testIntegration { return } @@ -76,7 +76,7 @@ func TestSMGResetStorDb(t *testing.T) { } // Start CGR Engine -func TestSMGStartEngine(t *testing.T) { +func TestSMGVoiceStartEngine(t *testing.T) { if !*testIntegration { return } @@ -86,7 +86,7 @@ func TestSMGStartEngine(t *testing.T) { } // Connect rpc client to rater -func TestSMGApierRpcConn(t *testing.T) { +func TestSMGVoiceApierRpcConn(t *testing.T) { if !*testIntegration { return } @@ -98,7 +98,7 @@ func TestSMGApierRpcConn(t *testing.T) { } // Load the tariff plan, creating accounts and their balances -func TestSMGTPFromFolder(t *testing.T) { +func TestSMGVoiceTPFromFolder(t *testing.T) { if !*testIntegration { return } @@ -110,14 +110,14 @@ func TestSMGTPFromFolder(t *testing.T) { time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for scheduler to execute topups } -func TestSMGMonetaryRefund(t *testing.T) { +func TestSMGVoiceMonetaryRefund(t *testing.T) { if !*testIntegration { return } smgEv := SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123451", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -147,7 +147,7 @@ func TestSMGMonetaryRefund(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123451", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -171,14 +171,14 @@ func TestSMGMonetaryRefund(t *testing.T) { } } -func TestSMGVoiceRefund(t *testing.T) { +func TestSMGVoiceVoiceRefund(t *testing.T) { if !*testIntegration { return } smgEv := SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123452", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -195,7 +195,7 @@ func TestSMGVoiceRefund(t *testing.T) { t.Error(err) } if maxUsage != 90 { - t.Error("Bad max usage: ", maxUsage) + t.Error("Received: ", maxUsage) } var acnt *engine.Account attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"} @@ -208,7 +208,7 @@ func TestSMGVoiceRefund(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123452", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -232,7 +232,7 @@ func TestSMGVoiceRefund(t *testing.T) { } } -func TestSMGMixedRefund(t *testing.T) { +func TestSMGVoiceMixedRefund(t *testing.T) { if !*testIntegration { return } @@ -246,7 +246,7 @@ func TestSMGMixedRefund(t *testing.T) { smgEv := SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123453", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -278,7 +278,7 @@ func TestSMGMixedRefund(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12345", + utils.ACCID: "123453", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -306,7 +306,7 @@ func TestSMGMixedRefund(t *testing.T) { t.Logf("After voice: %f", acnt.BalanceMap[utils.VOICE].GetTotalValue()) } -func TestSMGLastUsed(t *testing.T) { +func TestSMGVoiceLastUsed(t *testing.T) { if !*testIntegration { return } @@ -321,7 +321,7 @@ func TestSMGLastUsed(t *testing.T) { smgEv := SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12349", + utils.ACCID: "12350", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -349,7 +349,7 @@ func TestSMGLastUsed(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12349", + utils.ACCID: "12350", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -375,7 +375,7 @@ func TestSMGLastUsed(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12349", + utils.ACCID: "12350", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -392,7 +392,7 @@ func TestSMGLastUsed(t *testing.T) { if maxUsage != 120 { t.Error("Bad max usage: ", maxUsage) } - eAcntVal = 6.5901 + eAcntVal = 6.590100 if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { t.Error(err) } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { @@ -401,7 +401,7 @@ func TestSMGLastUsed(t *testing.T) { smgEv = SMGenericEvent{ utils.EVENT_NAME: "TEST_EVENT", utils.TOR: utils.VOICE, - utils.ACCID: "12349", + utils.ACCID: "12350", utils.DIRECTION: utils.OUT, utils.ACCOUNT: "1001", utils.SUBJECT: "1001", @@ -422,3 +422,289 @@ func TestSMGLastUsed(t *testing.T) { t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) } } + +func TestSMGVoiceLastUsedEnd(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"} + eAcntVal := 7.59000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234911", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:49", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "2m", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 6.190020 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234911", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "2m", + utils.LastUsed: "30s", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 6.090030 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234911", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.LastUsed: "0s", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 6.590000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } +} + +func TestSMGVoiceLastUsedNotFixed(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"} + eAcntVal := 6.59000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234922", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:49", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "2m", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 5.190020 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234922", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "2m", + utils.LastUsed: "13s", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + eAcntVal = 5.123360 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT", + utils.TOR: utils.VOICE, + utils.ACCID: "1234922", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1006", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.LastUsed: "0s", + } + var rpl string + if err = smgRPC.Call("SMGenericV1.SessionEnd", smgEv, &rpl); err != nil || rpl != utils.OK { + t.Error(err) + } + eAcntVal = 5.590000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } +} + +func TestSMGVoiceSessionTTL(t *testing.T) { + if !*testIntegration { + return + } + var acnt *engine.Account + attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001"} + eAcntVal := 5.590000 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv := SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT_SESSION_TTL", + utils.TOR: utils.VOICE, + utils.ACCID: "12360", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1008", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.SETUP_TIME: "2016-01-05 18:30:49", + utils.ANSWER_TIME: "2016-01-05 18:31:05", + utils.USAGE: "2m", + } + var maxUsage float64 + if err := smgRPC.Call("SMGenericV1.SessionStart", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + var aSessions []*ActiveSession + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{RunID: utils.StringPointer(utils.META_DEFAULT), OriginID: utils.StringPointer("12360")}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 { + t.Errorf("Unexpected number of sessions received: %+v", aSessions) + } else if aSessions[0].Usage != time.Duration(120)*time.Second { + t.Errorf("Expecting 2m, received usage: %v", aSessions[0].Usage) + } + eAcntVal = 4.190020 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + smgEv = SMGenericEvent{ + utils.EVENT_NAME: "TEST_EVENT_SESSION_TTL", + utils.TOR: utils.VOICE, + utils.ACCID: "12360", + utils.DIRECTION: utils.OUT, + utils.ACCOUNT: "1001", + utils.SUBJECT: "1001", + utils.DESTINATION: "1008", + utils.CATEGORY: "call", + utils.TENANT: "cgrates.org", + utils.REQTYPE: utils.META_PREPAID, + utils.USAGE: "2m", + utils.LastUsed: "30s", + } + if err := smgRPC.Call("SMGenericV1.SessionUpdate", smgEv, &maxUsage); err != nil { + t.Error(err) + } + if maxUsage != 120 { + t.Error("Bad max usage: ", maxUsage) + } + if err := smgRPC.Call("SMGenericV1.ActiveSessions", utils.AttrSMGGetActiveSessions{RunID: utils.StringPointer(utils.META_DEFAULT), OriginID: utils.StringPointer("12360")}, &aSessions); err != nil { + t.Error(err) + } else if len(aSessions) != 1 { + t.Errorf("Unexpected number of sessions received: %+v", aSessions) + } else if aSessions[0].Usage != time.Duration(150)*time.Second { + t.Errorf("Expecting 2m30s, received usage: %v", aSessions[0].Usage) + } + eAcntVal = 4.090030 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + time.Sleep(100 * time.Millisecond) + eAcntVal = 4.0565 + if err := smgRPC.Call("ApierV2.GetAccount", attrs, &acnt); err != nil { + t.Error(err) + } else if acnt.BalanceMap[utils.MONETARY].GetTotalValue() != eAcntVal { + t.Errorf("Expected: %f, received: %f", eAcntVal, acnt.BalanceMap[utils.MONETARY].GetTotalValue()) + } + var cdrs []*engine.ExternalCDR + req := utils.RPCCDRsFilter{RunIDs: []string{utils.META_DEFAULT}, DestinationPrefixes: []string{"1008"}} + if err := smgRPC.Call("ApierV2.GetCdrs", req, &cdrs); err != nil { + t.Error("Unexpected error: ", err.Error()) + } else if len(cdrs) != 1 { + t.Error("Unexpected number of CDRs returned: ", len(cdrs)) + } else { + if cdrs[0].Usage != "150.05" { + t.Errorf("Unexpected CDR Usage received, cdr: %v %+v ", cdrs[0].Usage, cdrs[0]) + } + if cdrs[0].Cost != 1.5333 { + t.Errorf("Unexpected CDR Cost received, cdr: %v %+v ", cdrs[0].Cost, cdrs[0]) + } + } +} diff --git a/sessionmanager/smg_session.go b/sessionmanager/smg_session.go index 6f94b1a0e..bb7e73f01 100644 --- a/sessionmanager/smg_session.go +++ b/sessionmanager/smg_session.go @@ -25,6 +25,7 @@ import ( "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) // One session handled by SM @@ -34,15 +35,16 @@ type SMGSession struct { connId string // Reference towards connection id on the session manager side. runId string // Keep a reference for the derived run timezone string - rater engine.Connector // Connector to Rater service - cdrsrv engine.Connector // Connector to CDRS service + rater rpcclient.RpcClientConnection // Connector to Rater service + cdrsrv rpcclient.RpcClientConnection // Connector to CDRS service extconns *SMGExternalConnections cd *engine.CallDescriptor sessionCds []*engine.CallDescriptor callCosts []*engine.CallCost extraDuration time.Duration // keeps the current duration debited on top of what heas been asked - lastUsage time.Duration // Keep record of the last debit for LastUsed functionality - totalUsage time.Duration + lastUsage time.Duration // last requested Duration + lastDebit time.Duration // last real debited duration + totalUsage time.Duration // sum of lastUsage } // Called in case of automatic debits @@ -54,7 +56,7 @@ func (self *SMGSession) debitLoop(debitInterval time.Duration) { return default: } - if maxDebit, err := self.debit(debitInterval, nilDuration); err != nil { + if maxDebit, err := self.debit(debitInterval, nil); err != nil { utils.Logger.Err(fmt.Sprintf(" Could not complete debit opperation on session: %s, error: %s", self.eventStart.GetUUID(), err.Error())) disconnectReason := SYSTEM_ERROR if err.Error() == utils.ErrUnauthorizedDestination.Error() { @@ -77,20 +79,32 @@ func (self *SMGSession) debitLoop(debitInterval time.Duration) { } // Attempts to debit a duration, returns maximum duration which can be debitted or error -func (self *SMGSession) debit(dur time.Duration, lastUsed time.Duration) (time.Duration, error) { - lastUsedCorrection := time.Duration(0) // Used if lastUsed influences the debit - if self.cd.DurationIndex != 0 && lastUsed != 0 { - if self.lastUsage > lastUsed { // We have debitted more than we have used, refund in the duration debitted - lastUsedCorrection = -(self.lastUsage - lastUsed) - } else { // We have debitted less than we have consumed, add the difference to duration debitted - lastUsedCorrection = lastUsed - self.lastUsage +func (self *SMGSession) debit(dur time.Duration, lastUsed *time.Duration) (time.Duration, error) { + requestedDuration := dur + //utils.Logger.Debug(fmt.Sprintf("InitDur: %f, lastUsed: %f", requestedDuration.Seconds(), lastUsed.Seconds())) + //utils.Logger.Debug(fmt.Sprintf("TotalUsage: %f, extraDuration: %f", self.totalUsage.Seconds(), self.extraDuration.Seconds())) + if lastUsed != nil { + self.extraDuration = self.lastDebit - *lastUsed + //utils.Logger.Debug(fmt.Sprintf("ExtraDuration LastUsed: %f", self.extraDuration.Seconds())) + if *lastUsed != self.lastUsage { + // total usage correction + self.totalUsage -= self.lastUsage + self.totalUsage += *lastUsed + //utils.Logger.Debug(fmt.Sprintf("TotalUsage Correction: %f", self.totalUsage.Seconds())) } } - // apply the lastUsed correction - dur += lastUsedCorrection - self.totalUsage += dur // Should reflect the total usage so far // apply correction from previous run - dur -= self.extraDuration + if self.extraDuration < dur { + dur -= self.extraDuration + } else { + self.lastUsage = requestedDuration + self.totalUsage += self.lastUsage + ccDuration := self.extraDuration // fake ccDuration + self.extraDuration -= dur + return ccDuration, nil + } + //utils.Logger.Debug(fmt.Sprintf("dur: %f", dur.Seconds())) + initialExtraDuration := self.extraDuration self.extraDuration = 0 if self.cd.LoopIndex > 0 { self.cd.TimeStart = self.cd.TimeEnd @@ -98,29 +112,40 @@ func (self *SMGSession) debit(dur time.Duration, lastUsed time.Duration) (time.D self.cd.TimeEnd = self.cd.TimeStart.Add(dur) self.cd.DurationIndex += dur cc := &engine.CallCost{} - if err := self.rater.MaxDebit(self.cd, cc); err != nil { + if err := self.rater.Call("Responder.MaxDebit", self.cd, cc); err != nil { self.lastUsage = 0 + self.lastDebit = 0 return 0, err } // cd corrections self.cd.TimeEnd = cc.GetEndTime() // set debited timeEnd // update call duration with real debited duration ccDuration := cc.GetDuration() + //utils.Logger.Debug(fmt.Sprintf("CCDur: %f", ccDuration.Seconds())) if ccDuration != dur { self.extraDuration = ccDuration - dur } + if ccDuration >= dur { + self.lastUsage = requestedDuration + } else { + self.lastUsage = ccDuration + } self.cd.DurationIndex -= dur self.cd.DurationIndex += ccDuration self.cd.MaxCostSoFar += cc.Cost self.cd.LoopIndex += 1 self.sessionCds = append(self.sessionCds, self.cd.Clone()) self.callCosts = append(self.callCosts, cc) - ccDuration -= lastUsedCorrection - if ccDuration < 0 { // if correction has pushed ccDuration bellow 0 - ccDuration = 0 + self.lastDebit = initialExtraDuration + ccDuration + self.totalUsage += self.lastUsage + //utils.Logger.Debug(fmt.Sprintf("TotalUsage: %f", self.totalUsage.Seconds())) + + if ccDuration >= dur { // we got what we asked to be debited + //utils.Logger.Debug(fmt.Sprintf("returning normal: %f", requestedDuration.Seconds())) + return requestedDuration, nil } - self.lastUsage = ccDuration // Reset the lastUsage for later reference - return ccDuration, nil + //utils.Logger.Debug(fmt.Sprintf("returning initialExtra: %f + ccDuration: %f", initialExtraDuration.Seconds(), ccDuration.Seconds())) + return initialExtraDuration + ccDuration, nil } // Attempts to refund a duration, error on failure @@ -167,10 +192,12 @@ func (self *SMGSession) refund(refundDuration time.Duration) error { if len(refundIncrements) > 0 { cd := firstCC.CreateCallDescriptor() cd.Increments = refundIncrements + cd.CgrID = self.cd.CgrID + cd.RunID = self.cd.RunID cd.Increments.Compress() - utils.Logger.Info(fmt.Sprintf("Refunding duration %v with cd: %s", initialRefundDuration, utils.ToJSON(cd))) + utils.Logger.Info(fmt.Sprintf("Refunding %s duration %v with incerements: %s", cd.CgrID, initialRefundDuration, utils.ToJSON(cd.Increments))) var response float64 - err := self.rater.RefundIncrements(cd, &response) + err := self.rater.Call("Responder.RefundIncrements", cd, &response) if err != nil { return err } @@ -189,7 +216,7 @@ func (self *SMGSession) close(endTime time.Time) error { for _, cc := range self.callCosts[1:] { firstCC.Merge(cc) } - //utils.Logger.Debug(fmt.Sprintf("MergedCC: %+v", firstCC)) + //utils.Logger.Debug("MergedCC: " + utils.ToJSON(firstCC)) end := firstCC.GetEndTime() refundDuration := end.Sub(endTime) self.refund(refundDuration) @@ -217,34 +244,36 @@ func (self *SMGSession) disconnectSession(reason string) error { } // Merge the sum of costs and sends it to CDRS for storage -func (self *SMGSession) saveOperations() error { +// originID could have been changed from original event, hence passing as argument here +func (self *SMGSession) saveOperations(originID string) error { if len(self.callCosts) == 0 { return nil // There are no costs to save, ignore the operation } firstCC := self.callCosts[0] // was merged in close method firstCC.Round() + //utils.Logger.Debug("Saved CC: " + utils.ToJSON(firstCC)) roundIncrements := firstCC.GetRoundIncrements() if len(roundIncrements) != 0 { cd := firstCC.CreateCallDescriptor() + cd.CgrID = self.cd.CgrID + cd.RunID = self.cd.RunID cd.Increments = roundIncrements var response float64 - if err := self.rater.RefundRounding(cd, &response); err != nil { + if err := self.rater.Call("Responder.RefundRounding", cd, &response); err != nil { return err } } - + smCost := &engine.SMCost{ + CGRID: self.eventStart.GetCgrId(self.timezone), + CostSource: utils.SESSION_MANAGER_SOURCE, + RunID: self.runId, + OriginHost: self.eventStart.GetOriginatorIP(utils.META_DEFAULT), + OriginID: originID, + Usage: self.TotalUsage().Seconds(), + CostDetails: firstCC, + } var reply string - err := self.cdrsrv.LogCallCost(&engine.CallCostLog{ - CgrId: self.eventStart.GetCgrId(self.timezone), - Source: utils.SESSION_MANAGER_SOURCE, - RunId: self.runId, - CallCost: firstCC, - CheckDuplicate: true, - }, &reply) - // this is a protection against the case when the close event is missed for some reason - // when the cdr arrives to cdrserver because our callcost is not there it will be rated - // as postpaid. When the close event finally arives we have to refund everything - if err != nil { + if err := self.cdrsrv.Call("CdrsV1.StoreSMCost", engine.AttrCDRSStoreSMCost{Cost: smCost, CheckDuplicate: true}, &reply); err != nil { if err == utils.ErrExists { self.refund(self.cd.GetDuration()) // Refund entire duration } else { @@ -261,13 +290,12 @@ func (self *SMGSession) TotalUsage() time.Duration { func (self *SMGSession) AsActiveSession(timezone string) *ActiveSession { sTime, _ := self.eventStart.GetSetupTime(utils.META_DEFAULT, timezone) aTime, _ := self.eventStart.GetAnswerTime(utils.META_DEFAULT, timezone) - usage, _ := self.eventStart.GetUsage(utils.META_DEFAULT) pdd, _ := self.eventStart.GetPdd(utils.META_DEFAULT) aSession := &ActiveSession{ CgrId: self.eventStart.GetCgrId(timezone), TOR: utils.VOICE, RunId: self.runId, - AccId: self.eventStart.GetUUID(), + OriginID: self.eventStart.GetUUID(), CdrHost: self.eventStart.GetOriginatorIP(utils.META_DEFAULT), CdrSource: self.eventStart.GetCdrSource(), ReqType: self.eventStart.GetReqType(utils.META_DEFAULT), @@ -279,7 +307,7 @@ func (self *SMGSession) AsActiveSession(timezone string) *ActiveSession { Destination: self.eventStart.GetDestination(utils.META_DEFAULT), SetupTime: sTime, AnswerTime: aTime, - Usage: usage, + Usage: self.TotalUsage(), Pdd: pdd, ExtraFields: self.eventStart.GetExtraFields(), Supplier: self.eventStart.GetSupplier(utils.META_DEFAULT), diff --git a/sessionmanager/smgeneric.go b/sessionmanager/smgeneric.go index e80417211..b81c79d95 100644 --- a/sessionmanager/smgeneric.go +++ b/sessionmanager/smgeneric.go @@ -21,6 +21,7 @@ package sessionmanager import ( "errors" "fmt" + "strings" "sync" "time" @@ -28,30 +29,101 @@ import ( "github.com/cgrates/cgrates/config" "github.com/cgrates/cgrates/engine" "github.com/cgrates/cgrates/utils" + "github.com/cgrates/rpcclient" ) var ErrPartiallyExecuted = errors.New("Partially executed") -func NewSMGeneric(cgrCfg *config.CGRConfig, rater engine.Connector, cdrsrv engine.Connector, timezone string, extconns *SMGExternalConnections) *SMGeneric { +func NewSMGeneric(cgrCfg *config.CGRConfig, rater rpcclient.RpcClientConnection, cdrsrv rpcclient.RpcClientConnection, timezone string, extconns *SMGExternalConnections) *SMGeneric { + gsm := &SMGeneric{cgrCfg: cgrCfg, rater: rater, cdrsrv: cdrsrv, extconns: extconns, timezone: timezone, - sessions: make(map[string][]*SMGSession), sessionsMux: new(sync.Mutex), guard: engine.NewGuardianLock()} + sessions: make(map[string][]*SMGSession), sessionTerminators: make(map[string]*smgSessionTerminator), sessionsMux: new(sync.RWMutex), guard: engine.Guardian} return gsm } type SMGeneric struct { - cgrCfg *config.CGRConfig // Separate from smCfg since there can be multiple - rater engine.Connector - cdrsrv engine.Connector - timezone string - sessions map[string][]*SMGSession //Group sessions per sessionId, multiple runs based on derived charging - extconns *SMGExternalConnections // Reference towards external connections manager - sessionsMux *sync.Mutex // Locks sessions map - guard *engine.GuardianLock // Used to lock on uuid + cgrCfg *config.CGRConfig // Separate from smCfg since there can be multiple + rater rpcclient.RpcClientConnection + cdrsrv rpcclient.RpcClientConnection + timezone string + sessions map[string][]*SMGSession //Group sessions per sessionId, multiple runs based on derived charging + sessionTerminators map[string]*smgSessionTerminator // terminate and cleanup the session if timer expires + extconns *SMGExternalConnections // Reference towards external connections manager + sessionsMux *sync.RWMutex // Locks sessions map + guard *engine.GuardianLock // Used to lock on uuid +} +type smgSessionTerminator struct { + timer *time.Timer + endChan chan bool + ttl time.Duration + ttlLastUsed *time.Duration + ttlUsage *time.Duration +} + +// Updates the timer for the session to a new ttl and terminate info +func (self *SMGeneric) resetTerminatorTimer(uuid string, ttl time.Duration, ttlLastUsed, ttlUsage *time.Duration) { + self.sessionsMux.RLock() + defer self.sessionsMux.RUnlock() + if st, found := self.sessionTerminators[uuid]; found { + if ttl != 0 { + st.ttl = ttl + } + if ttlLastUsed != nil { + st.ttlLastUsed = ttlLastUsed + } + if ttlUsage != nil { + st.ttlUsage = ttlUsage + } + st.timer.Reset(st.ttl) + } +} + +// Called when a session timeouts +func (self *SMGeneric) ttlTerminate(s *SMGSession, tmtr *smgSessionTerminator) { + debitUsage := tmtr.ttl + if tmtr.ttlUsage != nil { + debitUsage = *tmtr.ttlUsage + } + for _, s := range self.getSession(s.eventStart.GetUUID()) { + s.debit(debitUsage, tmtr.ttlLastUsed) + } + self.sessionEnd(s.eventStart.GetUUID(), s.TotalUsage()) + cdr := s.eventStart.AsStoredCdr(self.cgrCfg, self.timezone) + cdr.Usage = s.TotalUsage() + var reply string + self.cdrsrv.Call("CdrsV1.ProcessCdr", cdr, &reply) } func (self *SMGeneric) indexSession(uuid string, s *SMGSession) { self.sessionsMux.Lock() self.sessions[uuid] = append(self.sessions[uuid], s) + if self.cgrCfg.SmGenericConfig.SessionTTL != 0 { + if _, found := self.sessionTerminators[uuid]; !found { + ttl := self.cgrCfg.SmGenericConfig.SessionTTL + if ttlEv := s.eventStart.GetSessionTTL(); ttlEv != 0 { + ttl = ttlEv + } + timer := time.NewTimer(ttl) + endChan := make(chan bool, 1) + terminator := &smgSessionTerminator{ + timer: timer, + endChan: endChan, + ttl: ttl, + ttlLastUsed: s.eventStart.GetSessionTTLLastUsed(), + ttlUsage: s.eventStart.GetSessionTTLUsage(), + } + self.sessionTerminators[uuid] = terminator + go func() { + select { + case <-timer.C: + self.ttlTerminate(s, terminator) + case <-endChan: + timer.Stop() + } + }() + + } + } self.sessionsMux.Unlock() } @@ -59,10 +131,14 @@ func (self *SMGeneric) indexSession(uuid string, s *SMGSession) { func (self *SMGeneric) unindexSession(uuid string) bool { self.sessionsMux.Lock() defer self.sessionsMux.Unlock() - if _, hasIt := self.sessions[uuid]; !hasIt { + if _, found := self.sessions[uuid]; !found { return false } delete(self.sessions, uuid) + if st, found := self.sessionTerminators[uuid]; found { + st.endChan <- true + delete(self.sessionTerminators, uuid) + } return true } @@ -73,10 +149,22 @@ func (self *SMGeneric) getSessions() map[string][]*SMGSession { return self.sessions } -// Returns sessions/derived for a specific uuid -func (self *SMGeneric) getSession(uuid string) []*SMGSession { +func (self *SMGeneric) getSessionIDsForPrefix(prefix string) []string { self.sessionsMux.Lock() defer self.sessionsMux.Unlock() + sessionIDs := make([]string, 0) + for sessionID := range self.sessions { + if strings.HasPrefix(sessionID, prefix) { + sessionIDs = append(sessionIDs, sessionID) + } + } + return sessionIDs +} + +// Returns sessions/derived for a specific uuid +func (self *SMGeneric) getSession(uuid string) []*SMGSession { + self.sessionsMux.RLock() + defer self.sessionsMux.RUnlock() return self.sessions[uuid] } @@ -85,7 +173,7 @@ func (self *SMGeneric) sessionStart(evStart SMGenericEvent, connId string) error sessionId := evStart.GetUUID() _, err := self.guard.Guard(func() (interface{}, error) { // Lock it on UUID level var sessionRuns []*engine.SessionRun - if err := self.rater.GetSessionRuns(evStart.AsStoredCdr(self.cgrCfg, self.timezone), &sessionRuns); err != nil { + if err := self.rater.Call("Responder.GetSessionRuns", evStart.AsStoredCdr(self.cgrCfg, self.timezone), &sessionRuns); err != nil { return nil, err } else if len(sessionRuns) == 0 { return nil, nil @@ -102,7 +190,7 @@ func (self *SMGeneric) sessionStart(evStart SMGenericEvent, connId string) error } } return nil, nil - }, time.Duration(3)*time.Second, sessionId) + }, time.Duration(2)*time.Second, sessionId) return err } @@ -117,6 +205,7 @@ func (self *SMGeneric) sessionEnd(sessionId string, usage time.Duration) error { return nil, nil // Did not find the session so no need to close it anymore } for idx, s := range ss { + s.totalUsage = usage // save final usage as totalUsage //utils.Logger.Info(fmt.Sprintf(" Ending session: %s, runId: %s", sessionId, s.runId)) if idx == 0 && s.stopDebit != nil { close(s.stopDebit) // Stop automatic debits @@ -129,7 +218,7 @@ func (self *SMGeneric) sessionEnd(sessionId string, usage time.Duration) error { if err := s.close(aTime.Add(usage)); err != nil { utils.Logger.Err(fmt.Sprintf(" Could not close session: %s, runId: %s, error: %s", sessionId, s.runId, err.Error())) } - if err := s.saveOperations(); err != nil { + if err := s.saveOperations(sessionId); err != nil { utils.Logger.Err(fmt.Sprintf(" Could not save session: %s, runId: %s, error: %s", sessionId, s.runId, err.Error())) } } @@ -138,13 +227,38 @@ func (self *SMGeneric) sessionEnd(sessionId string, usage time.Duration) error { return err } +// Used when an update will relocate an initial session (eg multiple data streams) +func (self *SMGeneric) sessionRelocate(sessionID, initialID string) error { + _, err := self.guard.Guard(func() (interface{}, error) { // Lock it on initialID level + if utils.IsSliceMember([]string{sessionID, initialID}, "") { // Not allowed empty params here + return nil, utils.ErrMandatoryIeMissing + } + ssNew := self.getSession(sessionID) // Already relocated + if len(ssNew) != 0 { + return nil, nil + } + ss := self.getSession(initialID) + if len(ss) == 0 { // No need of relocation + return nil, utils.ErrNotFound + } + for i, s := range ss { + self.indexSession(sessionID, s) + if i == 0 { + self.unindexSession(initialID) + } + } + return nil, nil + }, time.Duration(2)*time.Second, initialID) + return err +} + // Methods to apply on sessions, mostly exported through RPC/Bi-RPC //Calculates maximum usage allowed for gevent func (self *SMGeneric) GetMaxUsage(gev SMGenericEvent, clnt *rpc2.Client) (time.Duration, error) { gev[utils.EVENT_NAME] = utils.CGR_AUTHORIZATION storedCdr := gev.AsStoredCdr(config.CgrConfig(), self.timezone) var maxDur float64 - if err := self.rater.GetDerivedMaxSessionTime(storedCdr, &maxDur); err != nil { + if err := self.rater.Call("Responder.GetDerivedMaxSessionTime", storedCdr, &maxDur); err != nil { return time.Duration(0), err } return time.Duration(maxDur), nil @@ -153,11 +267,12 @@ func (self *SMGeneric) GetMaxUsage(gev SMGenericEvent, clnt *rpc2.Client) (time. func (self *SMGeneric) GetLcrSuppliers(gev SMGenericEvent, clnt *rpc2.Client) ([]string, error) { gev[utils.EVENT_NAME] = utils.CGR_LCR_REQUEST cd, err := gev.AsLcrRequest().AsCallDescriptor(self.timezone) + cd.CgrID = gev.GetCgrId(self.timezone) if err != nil { return nil, err } var lcr engine.LCRCost - if err = self.rater.GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { + if err = self.rater.Call("Responder.GetLCR", &engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil { return nil, err } if lcr.HasErrors() { @@ -167,12 +282,39 @@ func (self *SMGeneric) GetLcrSuppliers(gev SMGenericEvent, clnt *rpc2.Client) ([ return lcr.SuppliersSlice() } +// Called on session start +func (self *SMGeneric) SessionStart(gev SMGenericEvent, clnt *rpc2.Client) (time.Duration, error) { + if err := self.sessionStart(gev, getClientConnId(clnt)); err != nil { + self.sessionEnd(gev.GetUUID(), 0) + return nilDuration, err + } + d, err := self.SessionUpdate(gev, clnt) + if err != nil || d == 0 { + self.sessionEnd(gev.GetUUID(), 0) + } + return d, err +} + // Execute debits for usage/maxUsage func (self *SMGeneric) SessionUpdate(gev SMGenericEvent, clnt *rpc2.Client) (time.Duration, error) { + self.resetTerminatorTimer(gev.GetUUID(), gev.GetSessionTTL(), gev.GetSessionTTLLastUsed(), gev.GetSessionTTLUsage()) + if initialID, err := gev.GetFieldAsString(utils.InitialOriginID); err == nil { + err := self.sessionRelocate(gev.GetUUID(), initialID) + if err == utils.ErrNotFound { // Session was already relocated, create a new session with this update + err = self.sessionStart(gev, getClientConnId(clnt)) + } + if err != nil { + return nilDuration, err + } + } + var lastUsed *time.Duration evLastUsed, err := gev.GetLastUsed(utils.META_DEFAULT) if err != nil && err != utils.ErrNotFound { return nilDuration, err } + if err == nil { + lastUsed = &evLastUsed + } evMaxUsage, err := gev.GetMaxUsage(utils.META_DEFAULT, self.cgrCfg.MaxCallDuration) if err != nil { if err == utils.ErrNotFound { @@ -180,9 +322,8 @@ func (self *SMGeneric) SessionUpdate(gev SMGenericEvent, clnt *rpc2.Client) (tim } return nilDuration, err } - evUuid := gev.GetUUID() - for _, s := range self.getSession(evUuid) { - if maxDur, err := s.debit(evMaxUsage, evLastUsed); err != nil { + for _, s := range self.getSession(gev.GetUUID()) { + if maxDur, err := s.debit(evMaxUsage, lastUsed); err != nil { return nilDuration, err } else if maxDur < evMaxUsage { evMaxUsage = maxDur @@ -191,48 +332,59 @@ func (self *SMGeneric) SessionUpdate(gev SMGenericEvent, clnt *rpc2.Client) (tim return evMaxUsage, nil } -// Called on session start -func (self *SMGeneric) SessionStart(gev SMGenericEvent, clnt *rpc2.Client) (time.Duration, error) { - if err := self.sessionStart(gev, getClientConnId(clnt)); err != nil { - return nilDuration, err - } - return self.SessionUpdate(gev, clnt) -} - // Called on session end, should stop debit loop func (self *SMGeneric) SessionEnd(gev SMGenericEvent, clnt *rpc2.Client) error { - usage, err := gev.GetUsage(utils.META_DEFAULT) - if err != nil { - if err != utils.ErrNotFound { - return err - + if initialID, err := gev.GetFieldAsString(utils.InitialOriginID); err == nil { + err := self.sessionRelocate(gev.GetUUID(), initialID) + if err == utils.ErrNotFound { // Session was already relocated, create a new session with this update + err = self.sessionStart(gev, getClientConnId(clnt)) } - lastUsed, err := gev.GetLastUsed(utils.META_DEFAULT) + if err != nil && err != utils.ErrMandatoryIeMissing { + return err + } + } + sessionIDs := []string{gev.GetUUID()} + if sessionIDPrefix, err := gev.GetFieldAsString(utils.OriginIDPrefix); err == nil { // OriginIDPrefix is present, OriginID will not be anymore considered + sessionIDs = self.getSessionIDsForPrefix(sessionIDPrefix) + } + usage, errUsage := gev.GetUsage(utils.META_DEFAULT) + var lastUsed time.Duration + if errUsage != nil { + if errUsage != utils.ErrNotFound { + return errUsage + } + var err error + lastUsed, err = gev.GetLastUsed(utils.META_DEFAULT) if err != nil { if err == utils.ErrNotFound { err = utils.ErrMandatoryIeMissing } return err } - var s *SMGSession - for _, s = range self.getSession(gev.GetUUID()) { - break - } - if s == nil { - return nil - } - usage = s.TotalUsage() + lastUsed } - if err := self.sessionEnd(gev.GetUUID(), usage); err != nil { - return err + var interimError error + for _, sessionID := range sessionIDs { + if errUsage != nil { + var s *SMGSession + for _, s = range self.getSession(sessionID) { + break + } + if s == nil { + continue // No session active, will not be able to close it anyway + } + usage = s.TotalUsage() - s.lastUsage + lastUsed + } + if err := self.sessionEnd(sessionID, usage); err != nil { + interimError = err // Last error will be the one returned as API result + } } - return nil + return interimError } // Processes one time events (eg: SMS) func (self *SMGeneric) ChargeEvent(gev SMGenericEvent, clnt *rpc2.Client) (maxDur time.Duration, err error) { var sessionRuns []*engine.SessionRun - if err := self.rater.GetSessionRuns(gev.AsStoredCdr(self.cgrCfg, self.timezone), &sessionRuns); err != nil { + if err := self.rater.Call("Responder.GetSessionRuns", gev.AsStoredCdr(self.cgrCfg, self.timezone), &sessionRuns); err != nil { return nilDuration, err } else if len(sessionRuns) == 0 { return nilDuration, nil @@ -240,7 +392,7 @@ func (self *SMGeneric) ChargeEvent(gev SMGenericEvent, clnt *rpc2.Client) (maxDu var maxDurInit bool // Avoid differences between default 0 and received 0 for _, sR := range sessionRuns { cc := new(engine.CallCost) - if err = self.rater.MaxDebit(sR.CallDescriptor, cc); err != nil { + if err = self.rater.Call("Responder.MaxDebit", sR.CallDescriptor, cc); err != nil { utils.Logger.Err(fmt.Sprintf(" Could not Debit CD: %+v, RunID: %s, error: %s", sR.CallDescriptor, sR.DerivedCharger.RunID, err.Error())) break } @@ -271,20 +423,14 @@ func (self *SMGeneric) ChargeEvent(gev SMGenericEvent, clnt *rpc2.Client) (maxDu } // refund cc if len(refundIncrements) > 0 { - cd := &engine.CallDescriptor{ - Direction: cc.Direction, - Tenant: cc.Tenant, - Category: cc.Category, - Subject: cc.Subject, - Account: cc.Account, - Destination: cc.Destination, - TOR: cc.TOR, - Increments: refundIncrements, - } + cd := cc.CreateCallDescriptor() + cd.Increments = refundIncrements + cd.CgrID = sR.CallDescriptor.CgrID + cd.RunID = sR.CallDescriptor.RunID cd.Increments.Compress() utils.Logger.Info(fmt.Sprintf("Refunding session run callcost: %s", utils.ToJSON(cd))) var response float64 - err := self.rater.RefundIncrements(cd, &response) + err := self.rater.Call("Responder.RefundIncrements", cd, &response) if err != nil { return nilDuration, err } @@ -309,19 +455,20 @@ func (self *SMGeneric) ChargeEvent(gev SMGenericEvent, clnt *rpc2.Client) (maxDu cd := cc.CreateCallDescriptor() cd.Increments = roundIncrements var response float64 - if err := self.rater.RefundRounding(cd, &response); err != nil { + if err := self.rater.Call("Responder.RefundRounding", cd, &response); err != nil { utils.Logger.Err(fmt.Sprintf(" ERROR failed to refund rounding: %v", err)) } } - var reply string - if err := self.cdrsrv.LogCallCost(&engine.CallCostLog{ - CgrId: gev.GetCgrId(self.timezone), - Source: utils.SESSION_MANAGER_SOURCE, - RunId: sR.DerivedCharger.RunID, - CallCost: cc, - CheckDuplicate: true, - }, &reply); err != nil && err != utils.ErrExists { + smCost := &engine.SMCost{ + CGRID: gev.GetCgrId(self.timezone), + CostSource: utils.SESSION_MANAGER_SOURCE, + RunID: sR.DerivedCharger.RunID, + OriginHost: gev.GetOriginatorIP(utils.META_DEFAULT), + OriginID: gev.GetUUID(), + CostDetails: cc, + } + if err := self.cdrsrv.Call("CdrsV1.StoreSMCost", engine.AttrCDRSStoreSMCost{Cost: smCost, CheckDuplicate: true}, &reply); err != nil && err != utils.ErrExists { withErrors = true utils.Logger.Err(fmt.Sprintf(" Could not save CC: %+v, RunID: %s error: %s", cc, sR.DerivedCharger.RunID, err.Error())) } @@ -334,7 +481,7 @@ func (self *SMGeneric) ChargeEvent(gev SMGenericEvent, clnt *rpc2.Client) (maxDu func (self *SMGeneric) ProcessCdr(gev SMGenericEvent) error { var reply string - if err := self.cdrsrv.ProcessCdr(gev.AsStoredCdr(self.cgrCfg, self.timezone), &reply); err != nil { + if err := self.cdrsrv.Call("CdrsV1.ProcessCdr", gev.AsStoredCdr(self.cgrCfg, self.timezone), &reply); err != nil { return err } return nil diff --git a/test.sh b/test.sh index 3a4a57e95..a54a4907e 100755 --- a/test.sh +++ b/test.sh @@ -1,50 +1,4 @@ #! /usr/bin/env sh ./build.sh - -go test -i github.com/cgrates/cgrates/apier/v1 -go test -i github.com/cgrates/cgrates/apier/v2 -go test -i github.com/cgrates/cgrates/engine -go test -i github.com/cgrates/cgrates/sessionmanager -go test -i github.com/cgrates/cgrates/config -go test -i github.com/cgrates/cgrates/cmd/cgr-engine -go test -i github.com/cgrates/cgrates/cache2go -go test -i github.com/cgrates/cgrates/cdrc -go test -i github.com/cgrates/cgrates/utils -go test -i github.com/cgrates/cgrates/history -go test -i github.com/cgrates/cgrates/cdre -go test -i github.com/cgrates/cgrates/agents -go test -i github.com/cgrates/cgrates/structmatcher - -go test github.com/cgrates/cgrates/apier/v1 -v1=$? -go test github.com/cgrates/cgrates/apier/v2 -v2=$? -go test github.com/cgrates/cgrates/engine -en=$? -go test github.com/cgrates/cgrates/general_tests -gt=$? -go test github.com/cgrates/cgrates/sessionmanager -sm=$? -go test github.com/cgrates/cgrates/config -cfg=$? -go test github.com/cgrates/cgrates/cmd/cgr-engine -cr=$? -go test github.com/cgrates/cgrates/console -con=$? -go test github.com/cgrates/cgrates/cdrc -cdrcs=$? -go test github.com/cgrates/cgrates/utils -ut=$? -go test github.com/cgrates/cgrates/history -hs=$? -go test github.com/cgrates/cgrates/cache2go -c2g=$? -go test github.com/cgrates/cgrates/cdre -cdre=$? -go test github.com/cgrates/cgrates/agents -ag=$? -go test github.com/cgrates/cgrates/structmatcher -sc=$? - - -exit $v1 && $v2 && $en && $gt && $sm && $cfg && $bl && $cr && $con && $cdrc && $ut && $hs && $c2g && $cdre && $ag && $sc +go test $(glide novendor) +exit $? \ No newline at end of file diff --git a/utils/apitpdata.go b/utils/apitpdata.go index 56dcd006f..80b428e9a 100644 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -275,6 +275,7 @@ type TPActions struct { type TPAction struct { Identifier string // Identifier mapped in the code BalanceId string // Balance identification string (account scope) + BalanceUuid string // Balance identification string (global scope) BalanceType string // Type of balance the action will operate on Directions string // Balance direction Units string // Number of units to add/deduct @@ -1160,6 +1161,7 @@ type AliasValue struct { // AttrSMGGetActiveSessions will filter returned sessions by SMGenericV1 type AttrSMGGetActiveSessions struct { ToR *string + OriginID *string RunID *string RequestType *string Tenant *string @@ -1169,3 +1171,10 @@ type AttrSMGGetActiveSessions struct { Destination *string Supplier *string } + +type AttrRateCDRs struct { + RPCCDRsFilter + StoreCDRs *bool + SendToStatS *bool // Set to true if the CDRs should be sent to stats server + ReplicateCDRs *bool // Replicate results +} diff --git a/utils/consts.go b/utils/consts.go index 72c15b145..d5f800d04 100644 --- a/utils/consts.go +++ b/utils/consts.go @@ -31,6 +31,7 @@ var ( ErrAccountDisabled = errors.New("ACCOUNT_DISABLED") ErrUserNotFound = errors.New("USER_NOT_FOUND") ErrInsufficientCredit = errors.New("INSUFFICENT_CREDIT") + ErrNotConvertible = errors.New("NOT_CONVERTIBLE") ) const ( @@ -113,6 +114,8 @@ const ( TOR = "ToR" ORDERID = "OrderID" ACCID = "OriginID" + InitialOriginID = "InitialOriginID" + OriginIDPrefix = "OriginIDPrefix" CDRSOURCE = "Source" CDRHOST = "OriginHost" REQTYPE = "RequestType" @@ -140,7 +143,7 @@ const ( FWV = "fwv" DRYRUN = "dry_run" META_COMBIMED = "*combimed" - INTERNAL = "internal" + MetaInternal = "*internal" ZERO_RATING_SUBJECT_PREFIX = "*zero" OK = "OK" CDRE_FIXED_WIDTH = "fwv" @@ -194,6 +197,7 @@ const ( LOG_CALL_COST_PREFIX = "cco_" LOG_ACTION_TIMMING_PREFIX = "ltm_" LOG_ACTION_TRIGGER_PREFIX = "ltr_" + VERSION_PREFIX = "ver_" LOG_ERR = "ler_" LOG_CDR = "cdr_" LOG_MEDIATED_CDR = "mcd_" @@ -238,7 +242,9 @@ const ( REFUND_INCR_CACHE_PREFIX = "REFUND_INCR_" REFUND_ROUND_CACHE_PREFIX = "REFUND_ROUND_" GET_SESS_RUNS_CACHE_PREFIX = "GET_SESS_RUNS_" + GET_DERIV_MAX_SESS_TIME = "GET_DERIV_MAX_SESS_TIME_" LOG_CALL_COST_CACHE_PREFIX = "LOG_CALL_COSTS_" + LCRCachePrefix = "LCR_" ALIAS_CONTEXT_RATING = "*rating" NOT_AVAILABLE = "N/A" CALL = "call" @@ -275,6 +281,10 @@ const ( UpdatedAt = "UpdatedAt" HandlerArgSep = "|" FlagForceDuration = "fd" + InstanceID = "InstanceID" + SessionTTL = "SessionTTL" + SessionTTLLastUsed = "SessionTTLLastUsed" + SessionTTLUsage = "SessionTTLUsage" ) var ( diff --git a/utils/coreutils.go b/utils/coreutils.go index 0f05b1044..abdbc1fa7 100644 --- a/utils/coreutils.go +++ b/utils/coreutils.go @@ -127,8 +127,9 @@ func Round(x float64, prec int, method string) float64 { } func ParseTimeDetectLayout(tmStr string, timezone string) (time.Time, error) { + tmStr = strings.TrimSpace(tmStr) var nilTime time.Time - if len(tmStr) == 0 { + if len(tmStr) == 0 || tmStr == UNLIMITED { return nilTime, nil } loc, err := time.LoadLocation(timezone) @@ -179,7 +180,7 @@ func ParseTimeDetectLayout(tmStr string, timezone string) (time.Time, error) { func ParseDate(date string) (expDate time.Time, err error) { date = strings.TrimSpace(date) switch { - case date == "*unlimited" || date == "": + case date == UNLIMITED || date == "": // leave it at zero case strings.HasPrefix(date, "+"): d, err := time.ParseDuration(date[1:]) @@ -195,7 +196,7 @@ func ParseDate(date string) (expDate time.Time, err error) { expDate = time.Now().AddDate(1, 0, 0) // add one year case date == "*month_end": expDate = GetEndOfMonth(time.Now()) - case strings.HasSuffix(date, "Z"): + case strings.HasSuffix(date, "Z") || strings.Index(date, "+") != -1: // Allow both Z and +hh:mm format expDate, err = time.Parse(time.RFC3339, date) default: unix, err := strconv.ParseInt(date, 10, 64) @@ -533,3 +534,21 @@ func GetEndOfMonth(ref time.Time) time.Time { eom := time.Date(year, month, 1, 0, 0, 0, 0, ref.Location()) return eom.Add(-time.Second) } + +// formats number in K,M,G, etc. +func SizeFmt(num float64, suffix string) string { + if suffix == "" { + suffix = "B" + } + for _, unit := range []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} { + if math.Abs(num) < 1024.0 { + return fmt.Sprintf("%3.1f%s%s", num, unit, suffix) + } + num /= 1024.0 + } + return fmt.Sprintf("%.1f%s%s", num, "Yi", suffix) +} + +func TimeIs0h(t time.Time) bool { + return t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 +} diff --git a/utils/dateseries.go b/utils/dateseries.go index 130bac5f2..818efa414 100644 --- a/utils/dateseries.go +++ b/utils/dateseries.go @@ -277,3 +277,13 @@ func (wd WeekDays) Serialize(sep string) string { } return wdStr } + +func DaysInMonth(year int, month time.Month) float64 { + return float64(time.Date(year, month, 1, 0, 0, 0, 0, time.UTC).AddDate(0, 1, -1).Day()) +} + +func DaysInYear(year int) float64 { + first := time.Date(year, 1, 1, 0, 0, 0, 0, time.UTC) + last := first.AddDate(1, 0, 0) + return float64(last.Sub(first).Hours() / 24) +} diff --git a/utils/dateseries_test.go b/utils/dateseries_test.go index 363aee178..ffd498827 100644 --- a/utils/dateseries_test.go +++ b/utils/dateseries_test.go @@ -161,3 +161,30 @@ func TestDateseriesMonthsIsCompleteYes(t *testing.T) { t.Error("Error months IsComplete: ", months) } } + +func TestDateseriesDaysInMonth(t *testing.T) { + if n := DaysInMonth(2016, 4); n != 30 { + t.Error("error calculating days: ", n) + } + if n := DaysInMonth(2016, 2); n != 29 { + t.Error("error calculating days: ", n) + } + if n := DaysInMonth(2016, 1); n != 31 { + t.Error("error calculating days: ", n) + } + if n := DaysInMonth(2016, 12); n != 31 { + t.Error("error calculating days: ", n) + } + if n := DaysInMonth(2015, 2); n != 28 { + t.Error("error calculating days: ", n) + } +} + +func TestDateseriesDaysInYear(t *testing.T) { + if n := DaysInYear(2016); n != 366 { + t.Error("error calculating days: ", n) + } + if n := DaysInYear(2015); n != 365 { + t.Error("error calculating days: ", n) + } +} diff --git a/utils/httpclient.go b/utils/httpclient.go index 5e048ebd0..169067914 100644 --- a/utils/httpclient.go +++ b/utils/httpclient.go @@ -22,7 +22,6 @@ import ( "bytes" "crypto/tls" "encoding/gob" - "encoding/json" "fmt" "io/ioutil" "net/http" @@ -49,16 +48,12 @@ func GetBytes(content interface{}) ([]byte, error) { } // Post without automatic failover -func HttpJsonPost(url string, skipTlsVerify bool, content interface{}) ([]byte, error) { - body, err := json.Marshal(content) - if err != nil { - return nil, err - } +func HttpJsonPost(url string, skipTlsVerify bool, content []byte) ([]byte, error) { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: skipTlsVerify}, } client := &http.Client{Transport: tr} - resp, err := client.Post(url, "application/json", bytes.NewBuffer(body)) + resp, err := client.Post(url, "application/json", bytes.NewBuffer(content)) if err != nil { return nil, err } @@ -80,7 +75,7 @@ func HttpPoster(addr string, skipTlsVerify bool, content interface{}, contentTyp var err error switch contentType { case CONTENT_JSON: - body, err = json.Marshal(content) + body = content.([]byte) case CONTENT_FORM: urlData = content.(url.Values) case CONTENT_TEXT: diff --git a/utils/httpclient_local_test.go b/utils/httpclient_local_test.go index 7d73b00bd..ecdf49481 100644 --- a/utils/httpclient_local_test.go +++ b/utils/httpclient_local_test.go @@ -39,15 +39,15 @@ func TestHttpJsonPoster(t *testing.T) { return } content := &TestContent{Var1: "Val1", Var2: "Val2"} + jsn, _ := json.Marshal(content) filePath := "/tmp/cgr_test_http_poster.json" - if _, err := HttpPoster("http://localhost:8080/invalid", true, content, CONTENT_JSON, 3, filePath); err != nil { + if _, err := HttpPoster("http://localhost:8080/invalid", true, jsn, CONTENT_JSON, 3, filePath); err != nil { t.Error(err) } - jsnContent, _ := json.Marshal(content) if readBytes, err := ioutil.ReadFile(filePath); err != nil { t.Error(err) - } else if !reflect.DeepEqual(jsnContent, readBytes) { - t.Errorf("Expecting: %q, received: %q", string(jsnContent), string(readBytes)) + } else if !reflect.DeepEqual(jsn, readBytes) { + t.Errorf("Expecting: %q, received: %q", string(jsn), string(readBytes)) } if err := os.Remove(filePath); err != nil { t.Error("Failed removing file: ", filePath) diff --git a/utils/map.go b/utils/map.go index 0bf197a7a..f9f63df76 100644 --- a/utils/map.go +++ b/utils/map.go @@ -67,7 +67,11 @@ func NewStringMap(s ...string) StringMap { for _, v := range s { v = strings.TrimSpace(v) if v != "" { - result[v] = true + if strings.HasPrefix(v, "!") { + result[v[1:]] = false + } else { + result[v] = true + } } } return result @@ -128,7 +132,11 @@ func StringMapFromSlice(s []string) StringMap { for _, v := range s { v = strings.TrimSpace(v) if v != "" { - result[v] = true + if strings.HasPrefix(v, "!") { + result[v[1:]] = false + } else { + result[v] = true + } } } return result diff --git a/utils/map_test.go b/utils/map_test.go new file mode 100644 index 000000000..a8d629492 --- /dev/null +++ b/utils/map_test.go @@ -0,0 +1,33 @@ +package utils + +import "testing" + +func TestStringMapParse(t *testing.T) { + sm := ParseStringMap("1;2;3;4") + if len(sm) != 4 { + t.Error("Error pasring map: ", sm) + } +} + +func TestStringMapParseNegative(t *testing.T) { + sm := ParseStringMap("1;2;!3;4") + if len(sm) != 4 { + t.Error("Error pasring map: ", sm) + } + if sm["3"] != false { + t.Error("Error parsing negative: ", sm) + } +} + +func TestStringMapCompare(t *testing.T) { + sm := ParseStringMap("1;2;!3;4") + if include, found := sm["2"]; include != true && found != true { + t.Error("Error detecting positive: ", sm) + } + if include, found := sm["3"]; include != false && found != true { + t.Error("Error detecting negative: ", sm) + } + if include, found := sm["5"]; include != false && found != false { + t.Error("Error detecting missing: ", sm) + } +} diff --git a/utils/rpc_params.go b/utils/rpc_params.go new file mode 100644 index 000000000..3e5f3f166 --- /dev/null +++ b/utils/rpc_params.go @@ -0,0 +1,49 @@ +package utils + +import "reflect" + +var rpcParamsMap map[string]*RpcParams + +type RpcParams struct { + Object interface{} + InParam reflect.Value + OutParam interface{} +} + +func init() { + rpcParamsMap = make(map[string]*RpcParams) +} + +func RegisterRpcParams(name string, obj interface{}) { + objType := reflect.TypeOf(obj) + if name == "" { + val := reflect.ValueOf(obj) + name = objType.Name() + if val.Kind() == reflect.Ptr { + name = objType.Elem().Name() + } + } + for i := 0; i < objType.NumMethod(); i++ { + method := objType.Method(i) + methodType := method.Type + if methodType.NumIn() == 3 { // if it has three parameters (one is self and two are rpc params) + out := methodType.In(2) + if out.Kind() == reflect.Ptr { + out = out.Elem() + } + rpcParamsMap[name+"."+method.Name] = &RpcParams{ + Object: obj, + InParam: reflect.New(methodType.In(1)), + OutParam: reflect.New(out).Interface(), + } + } + } +} + +func GetRpcParams(method string) (*RpcParams, error) { + x, found := rpcParamsMap[method] + if !found { + return nil, ErrNotFound + } + return x, nil +} diff --git a/utils/rpc_params_test.go b/utils/rpc_params_test.go new file mode 100644 index 000000000..aac04ecdc --- /dev/null +++ b/utils/rpc_params_test.go @@ -0,0 +1,48 @@ +package utils + +import "testing" + +type RpcStruct struct{} + +type Attr struct { + Name string + Surname string + Age float64 +} + +func (rpc *RpcStruct) Hopa(normal Attr, out *float64) error { + return nil +} + +func (rpc *RpcStruct) Tropa(pointer *Attr, out *float64) error { + return nil +} + +func (rpc *RpcStruct) Call(string, interface{}, interface{}) error { + return nil +} + +func TestRPCObjectPointer(t *testing.T) { + RegisterRpcParams("", &RpcStruct{}) + if len(rpcParamsMap) != 2 { + t.Errorf("error registering rpc object: %v", rpcParamsMap) + } + x, found := rpcParamsMap["RpcStruct.Hopa"] + if !found { + t.Errorf("error getting rpcobject: %v (%+v)", rpcParamsMap, x) + } + a := x.InParam + if v, err := FromMapStringInterfaceValue(map[string]interface{}{"Name": "a", "Surname": "b", "Age": 10.2}, a); err != nil || v.(Attr).Name != "a" || v.(Attr).Surname != "b" || v.(Attr).Age != 10.2 { + t.Errorf("error converting to struct: %+v (%v)", v, err) + } + //TODO: make pointer in arguments usable + /*x, found = rpcParamsMap["RpcStruct.Tropa"] + if !found { + t.Errorf("error getting rpcobject: %v (%+v)", rpcParamsMap, x) + } + b := x.InParam + log.Printf("T: %+v", b) + if v, err := FromMapStringInterfaceValue(map[string]interface{}{"Name": "a", "Surname": "b", "Age": 10.2}, b); err != nil || v.(Attr).Name != "a" || v.(Attr).Surname != "b" || v.(Attr).Age != 10.2 { + t.Errorf("error converting to struct: %+v (%v)", v, err) + }*/ +} diff --git a/utils/struct.go b/utils/struct.go index 0b16caeef..e6f20fff3 100644 --- a/utils/struct.go +++ b/utils/struct.go @@ -18,6 +18,7 @@ along with this program. If not, see package utils import ( + "errors" "reflect" "strconv" "strings" @@ -171,6 +172,49 @@ func FromMapStringString(m map[string]string, in interface{}) { return } +func FromMapStringInterface(m map[string]interface{}, in interface{}) error { + v := reflect.ValueOf(in) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + for fieldName, fieldValue := range m { + field := v.FieldByName(fieldName) + if field.IsValid() { + if !field.IsValid() || !field.CanSet() { + continue + } + structFieldType := field.Type() + val := reflect.ValueOf(fieldValue) + if structFieldType != val.Type() { + return errors.New("Provided value type didn't match obj field type") + } + field.Set(val) + } + } + return nil +} + +func FromMapStringInterfaceValue(m map[string]interface{}, v reflect.Value) (interface{}, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + for fieldName, fieldValue := range m { + field := v.FieldByName(fieldName) + if field.IsValid() { + if !field.IsValid() || !field.CanSet() { + continue + } + structFieldType := field.Type() + val := reflect.ValueOf(fieldValue) + if structFieldType != val.Type() { + return nil, errors.New("Provided value type didn't match obj field type") + } + field.Set(val) + } + } + return v.Interface(), nil +} + // Update struct with map fields, returns not matching map keys, s is a struct to be updated func UpdateStructWithStrMap(s interface{}, m map[string]string) []string { notMatched := []string{} diff --git a/utils/struct_test.go b/utils/struct_test.go index 50371ea36..74450a7cb 100644 --- a/utils/struct_test.go +++ b/utils/struct_test.go @@ -84,3 +84,32 @@ func TestStructExtraFields(t *testing.T) { t.Errorf("expected: %v got: %v", ts.ExtraFields, efMap) } } + +func TestStructFromMapStringInterface(t *testing.T) { + ts := &struct { + Name string + Class *string + List []string + Elements struct { + Type string + Value float64 + } + }{} + s := "test2" + m := map[string]interface{}{ + "Name": "test1", + "Class": &s, + "List": []string{"test3", "test4"}, + "Elements": struct { + Type string + Value float64 + }{ + Type: "test5", + Value: 9.8, + }, + } + if err := FromMapStringInterface(m, ts); err != nil { + t.Logf("ts: %+v", ToJSON(ts)) + t.Error("Error converting map to struct: ", err) + } +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 1edd0ada0..d33674aa8 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -156,7 +156,20 @@ func TestParseTimeDetectLayout(t *testing.T) { if err == nil { t.Errorf("Expecting error") } + tmStr = "2016-04-01T02:00:00+02:00" + expectedTime = time.Date(2016, 4, 1, 0, 0, 0, 0, time.UTC) + tm, err = ParseTimeDetectLayout(tmStr, "") + if err != nil { + t.Error(err) + } else if !tm.Equal(expectedTime) { + t.Errorf("Unexpected time parsed: %v, expecting: %v", tm, expectedTime) + } + _, err = ParseTimeDetectLayout(tmStr[1:], "") + if err == nil { + t.Errorf("Expecting error") + } sqlTmStr := "2013-12-30 15:00:01" + expectedTime = time.Date(2013, 12, 30, 15, 0, 1, 0, time.UTC) sqlTm, err := ParseTimeDetectLayout(sqlTmStr, "") if err != nil { t.Error(err) @@ -291,6 +304,11 @@ func TestParseDateRFC3339(t *testing.T) { if err != nil || !date.Equal(expected) { t.Error("error parsing date: ", expected.Sub(date)) } + date, err = ParseDate("2016-04-01T02:00:00+02:00") + expected = time.Date(2016, 4, 1, 0, 0, 0, 0, time.UTC) + if err != nil || !date.Equal(expected) { + t.Errorf("Expecting: %v, received: %v", expected, date) + } } func TestMissingStructFieldsCorrect(t *testing.T) { diff --git a/utils/value_formula.go b/utils/value_formula.go new file mode 100644 index 000000000..1e6221087 --- /dev/null +++ b/utils/value_formula.go @@ -0,0 +1,89 @@ +package utils + +import ( + "encoding/json" + "errors" + "log" + "strconv" + "time" +) + +//for computing a dynamic value for Value field +type ValueFormula struct { + Method string + Params map[string]interface{} + Static float64 +} + +func ParseBalanceFilterValue(val string) (*ValueFormula, error) { + u, err := strconv.ParseFloat(val, 64) + if err == nil { + return &ValueFormula{Static: u}, err + } + var vf ValueFormula + if err := json.Unmarshal([]byte(val), &vf); err == nil { + return &vf, err + } + return nil, errors.New("Invalid value: " + val) +} + +type valueFormula func(map[string]interface{}) float64 + +const ( + INCREMENTAL = "*incremental" +) + +var ValueFormulas = map[string]valueFormula{ + INCREMENTAL: incrementalFormula, +} + +func (vf *ValueFormula) String() string { + return ToJSON(vf) +} + +func incrementalFormula(params map[string]interface{}) float64 { + // check parameters + unitsInterface, unitsFound := params["Units"] + intervalInterface, intervalFound := params["Interval"] + incrementInterface, incrementFound := params["Increment"] + + if !unitsFound || !intervalFound || !incrementFound { + return 0.0 + } + units, ok := unitsInterface.(float64) + if !ok { + log.Print("units") + return 0.0 + } + var interval string + switch intr := intervalInterface.(type) { + case string: + interval = intr + case []byte: + interval = string(intr) + default: + return 0.0 + } + var increment string + switch incr := incrementInterface.(type) { + case string: + increment = incr + case []byte: + increment = string(incr) + default: + return 0.0 + } + now := time.Now() + if increment == "day" { + if interval == "week" { + return units / 7 + } + if interval == "month" { + return units / DaysInMonth(now.Year(), now.Month()) + } + if interval == "year" { + return units / DaysInYear(now.Year()) + } + } + return 0.0 +} diff --git a/utils/value_formula_test.go b/utils/value_formula_test.go new file mode 100644 index 000000000..1aab18106 --- /dev/null +++ b/utils/value_formula_test.go @@ -0,0 +1,39 @@ +package utils + +import ( + "encoding/json" + "testing" + "time" +) + +func TestValueFormulaDayWeek(t *testing.T) { + params := make(map[string]interface{}) + if err := json.Unmarshal([]byte(`{"Units":10, "Interval":"week", "Increment":"day"}`), ¶ms); err != nil { + t.Error("error unmarshalling params: ", err) + } + if x := incrementalFormula(params); x != 10/7.0 { + t.Error("error caclulating value using formula: ", x) + } +} + +func TestValueFormulaDayMonth(t *testing.T) { + params := make(map[string]interface{}) + if err := json.Unmarshal([]byte(`{"Units":10, "Interval":"month", "Increment":"day"}`), ¶ms); err != nil { + t.Error("error unmarshalling params: ", err) + } + now := time.Now() + if x := incrementalFormula(params); x != 10/DaysInMonth(now.Year(), now.Month()) { + t.Error("error caclulating value using formula: ", x) + } +} + +func TestValueFormulaDayYear(t *testing.T) { + params := make(map[string]interface{}) + if err := json.Unmarshal([]byte(`{"Units":10, "Interval":"year", "Increment":"day"}`), ¶ms); err != nil { + t.Error("error unmarshalling params: ", err) + } + now := time.Now() + if x := incrementalFormula(params); x != 10/DaysInYear(now.Year()) { + t.Error("error caclulating value using formula: ", x) + } +}