mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-12 18:46:24 +05:00
Merge branch 'master' into hapool
This commit is contained in:
@@ -168,8 +168,7 @@ func (self *ApierV1) SetAccount(attr utils.AttrSetAccount, reply *string) error
|
||||
ub = bal
|
||||
} else { // Not found in db, create it here
|
||||
ub = &engine.Account{
|
||||
Id: balanceId,
|
||||
AllowNegative: attr.AllowNegative,
|
||||
Id: balanceId,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +182,12 @@ func (self *ApierV1) SetAccount(attr utils.AttrSetAccount, reply *string) error
|
||||
at.AccountIds = append(at.AccountIds, balanceId)
|
||||
}
|
||||
}
|
||||
if attr.AllowNegative != nil {
|
||||
ub.AllowNegative = *attr.AllowNegative
|
||||
}
|
||||
if attr.Disabled != nil {
|
||||
ub.Disabled = *attr.Disabled
|
||||
}
|
||||
// All prepared, save account
|
||||
if err := self.AccountDb.SetAccount(ub); err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -32,20 +32,23 @@ func (self *ApierV1) GetLcr(lcrReq engine.LcrRequest, lcrReply *engine.LcrReply)
|
||||
return err
|
||||
}
|
||||
var lcrQried engine.LCRCost
|
||||
if err := self.Responder.GetLCR(cd, &lcrQried); err != nil {
|
||||
if err := self.Responder.GetLCR(&engine.AttrGetLcr{CallDescriptor: cd, Paginator: lcrReq.Paginator}, &lcrQried); err != nil {
|
||||
return utils.NewErrServerError(err)
|
||||
}
|
||||
if lcrQried.Entry == nil {
|
||||
return utils.ErrNotFound
|
||||
}
|
||||
if lcrQried.HasErrors() {
|
||||
lcrQried.LogErrors()
|
||||
return fmt.Errorf("%s:%s", utils.ErrServerError.Error(), "LCR_COMPUTE_ERRORS")
|
||||
}
|
||||
lcrReply.DestinationId = lcrQried.Entry.DestinationId
|
||||
lcrReply.RPCategory = lcrQried.Entry.RPCategory
|
||||
lcrReply.Strategy = lcrQried.Entry.Strategy
|
||||
for _, qriedSuppl := range lcrQried.SupplierCosts {
|
||||
if qriedSuppl.Error != "" {
|
||||
engine.Logger.Err(fmt.Sprintf("LCR_ERROR: supplier <%s>, error <%s>", qriedSuppl.Supplier, qriedSuppl.Error))
|
||||
if !lcrReq.IgnoreErrors {
|
||||
return fmt.Errorf("%s:%s", utils.ErrServerError.Error(), "LCR_COMPUTE_ERRORS")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if dtcs, err := utils.NewDTCSFromRPKey(qriedSuppl.Supplier); err != nil {
|
||||
return utils.NewErrServerError(err)
|
||||
} else {
|
||||
@@ -62,12 +65,14 @@ func (self *ApierV1) GetLcrSuppliers(lcrReq engine.LcrRequest, suppliers *string
|
||||
return err
|
||||
}
|
||||
var lcrQried engine.LCRCost
|
||||
if err := self.Responder.GetLCR(cd, &lcrQried); err != nil {
|
||||
if err := self.Responder.GetLCR(&engine.AttrGetLcr{CallDescriptor: cd, Paginator: lcrReq.Paginator}, &lcrQried); err != nil {
|
||||
return utils.NewErrServerError(err)
|
||||
}
|
||||
if lcrQried.HasErrors() {
|
||||
lcrQried.LogErrors()
|
||||
return fmt.Errorf("%s:%s", utils.ErrServerError.Error(), "LCR_ERRORS")
|
||||
if !lcrReq.IgnoreErrors {
|
||||
return fmt.Errorf("%s:%s", utils.ErrServerError.Error(), "LCR_COMPUTE_ERRORS")
|
||||
}
|
||||
}
|
||||
if suppliersStr, err := lcrQried.SuppliersString(); err != nil {
|
||||
return utils.NewErrServerError(err)
|
||||
|
||||
@@ -310,6 +310,8 @@ func (cdre *CdrExporter) composeTrailer() error {
|
||||
func (cdre *CdrExporter) processCdr(cdr *engine.StoredCdr) error {
|
||||
if cdr == nil || len(cdr.CgrId) == 0 { // We do not export empty CDRs
|
||||
return nil
|
||||
} else if cdr.ExtraFields == nil { // Avoid assignment in nil map if not initialized
|
||||
cdr.ExtraFields = make(map[string]string)
|
||||
}
|
||||
// Cost multiply
|
||||
if cdre.dataUsageMultiplyFactor != 0.0 && cdr.TOR == utils.DATA {
|
||||
|
||||
@@ -67,8 +67,17 @@ func (ce *CommandExecuter) clientArgs(iface interface{}) (args []string) {
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
valField := val.Field(i)
|
||||
typeField := typ.Field(i)
|
||||
//log.Printf("%v (%v)", typeField.Name, valField.Kind())
|
||||
switch valField.Kind() {
|
||||
case reflect.Struct:
|
||||
case reflect.Ptr, reflect.Struct:
|
||||
if valField.Kind() == reflect.Ptr {
|
||||
valField = reflect.New(valField.Type().Elem()).Elem()
|
||||
if valField.Kind() != reflect.Struct {
|
||||
//log.Printf("Here: %v (%v)", typeField.Name, valField.Kind())
|
||||
args = append(args, typeField.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
args = append(args, ce.clientArgs(valField.Interface())...)
|
||||
default:
|
||||
args = append(args, typeField.Name)
|
||||
|
||||
@@ -20,13 +20,14 @@ package console
|
||||
|
||||
import (
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
func init() {
|
||||
c := &CmdGetLcr{
|
||||
name: "lcr",
|
||||
rpcMethod: "ApierV1.GetLcr",
|
||||
rpcParams: &engine.LcrRequest{},
|
||||
rpcParams: &engine.LcrRequest{Paginator: &utils.Paginator{}},
|
||||
}
|
||||
commands[c.Name()] = c
|
||||
c.CommandExecuter = &CommandExecuter{c}
|
||||
@@ -50,7 +51,7 @@ func (self *CmdGetLcr) RpcMethod() string {
|
||||
|
||||
func (self *CmdGetLcr) RpcParams(reset bool) interface{} {
|
||||
if reset || self.rpcParams == nil {
|
||||
self.rpcParams = &engine.LcrRequest{}
|
||||
self.rpcParams = &engine.LcrRequest{Paginator: &utils.Paginator{}}
|
||||
}
|
||||
return self.rpcParams
|
||||
}
|
||||
|
||||
@@ -5,5 +5,7 @@
|
||||
*out,cgrates.org,call,1002,*any,*any,lcr_profile1,*qos,,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,1003,*any,DST_1002,lcr_profile1,*qos_threshold,20;;;;2m;;;;;;;,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,1003,*any,*any,lcr_profile1,*qos_threshold,40;;;;90s;;;;;;;,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,1004,*any,DST_1002,lcr_profile1,*load_distribution,supplier1:5;supplier2:3;*default:1,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,1004,*any,*any,lcr_profile1,*load_distribution,,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,*any,*any,DST_1002,lcr_profile2,*lowest_cost,,2014-01-14T00:00:00Z,10
|
||||
*out,cgrates.org,call,*any,*any,*any,lcr_profile1,*lowest_cost,,2014-01-14T00:00:00Z,10
|
||||
|
Binary file not shown.
@@ -60,7 +60,7 @@ loadmodule "jsonrpc-s.so"
|
||||
# ----------------- setting module-specific parameters ---------------
|
||||
|
||||
# ----- mi_fifo params -----
|
||||
modparam("mi_fifo", "fifo_name", "/tmp/cgr_kamevapi/kamailio/run/kamailio_fifo")
|
||||
modparam("mi_fifo", "fifo_name", "/tmp/kamailio_fifo")
|
||||
|
||||
# ----- tm params -----
|
||||
modparam("tm", "failure_reply_mode", 3)
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
cgr-engine configuration file
|
||||
=============================
|
||||
|
||||
Organized into configuration sections. All configuration options come with defaults and we have tried our best to choose the best ones for a minimum of efforts necessary when running.
|
||||
|
||||
Bellow is the default configuration file which comes hardcoded into cgr-engine, most of them being explained and exemplified there.
|
||||
|
||||
::
|
||||
|
||||
[global]
|
||||
# ratingdb_type = redis # Rating subsystem database: <redis>.
|
||||
# ratingdb_host = 127.0.0.1 # Rating subsystem database host address.
|
||||
# ratingdb_port = 6379 # Rating subsystem port to reach the database.
|
||||
# ratingdb_name = 10 # Rating subsystem database name to connect to.
|
||||
# ratingdb_user = # Rating subsystem username to use when connecting to database.
|
||||
# ratingdb_passwd = # Rating subsystem password to use when connecting to database.
|
||||
# accountdb_type = redis # Accounting subsystem database: <redis>.
|
||||
# accountdb_host = 127.0.0.1 # Accounting subsystem database host address.
|
||||
# accountdb_port = 6379 # Accounting subsystem port to reach the database.
|
||||
# accountdb_name = 11 # Accounting subsystem database name to connect to.
|
||||
# accountdb_user = # Accounting subsystem username to use when connecting to database.
|
||||
# accountdb_passwd = # Accounting subsystem password to use when connecting to database.
|
||||
# stordb_type = mysql # Stor database type to use: <mysql>
|
||||
# stordb_host = 127.0.0.1 # The host to connect to. Values that start with / are for UNIX domain sockets.
|
||||
# stordb_port = 3306 # The port to reach the logdb.
|
||||
# stordb_name = cgrates # The name of the log database to connect to.
|
||||
# stordb_user = cgrates # Username to use when connecting to stordb.
|
||||
# stordb_passwd = CGRateS.org # Password to use when connecting to stordb.
|
||||
# dbdata_encoding = msgpack # The encoding used to store object data in strings: <msgpack|json>
|
||||
# rpc_json_listen = 127.0.0.1:2012 # RPC JSON listening address
|
||||
# rpc_gob_listen = 127.0.0.1:2013 # RPC GOB listening address
|
||||
# http_listen = 127.0.0.1:2080 # HTTP listening address
|
||||
# default_reqtype = rated # Default request type to consider when missing from requests: <""|prepaid|postpaid|pseudoprepaid|rated>.
|
||||
# default_category = call # Default Type of Record to consider when missing from requests.
|
||||
# default_tenant = cgrates.org # Default Tenant to consider when missing from requests.
|
||||
# default_subject = cgrates # Default rating Subject to consider when missing from requests.
|
||||
# rounding_decimals = 10 # System level precision for floats
|
||||
# http_skip_tls_veify = false # If enabled Http Client will accept any TLS certificate
|
||||
# xmlcfg_path = # Path towards additional config defined in xml file
|
||||
|
||||
[balancer]
|
||||
# enabled = false # Start Balancer service: <true|false>.
|
||||
|
||||
[rater]
|
||||
# enabled = false # Enable RaterCDRSExportPath service: <true|false>.
|
||||
# balancer = # Register to Balancer as worker: <""|internal|127.0.0.1:2013>.
|
||||
|
||||
[scheduler]
|
||||
# enabled = false # Starts Scheduler service: <true|false>.
|
||||
|
||||
[cdrs]
|
||||
# enabled = false # Start the CDR Server service: <true|false>.
|
||||
# extra_fields = # Extra fields to store in CDRs for non-generic CDRs
|
||||
# mediator = # Address where to reach the Mediator. Empty for disabling mediation. <""|internal>
|
||||
# cdrstats = # Address where to reach the cdrstats service: <internal|x.y.z.y:1234>
|
||||
# store_disable = false # When true, CDRs will not longer be saved in stordb, useful for cdrstats only scenario
|
||||
|
||||
[cdre]
|
||||
# cdr_format = csv # Exported CDRs format <csv>
|
||||
# data_usage_multiply_factor = 0.0 # Multiply data usage before export (eg: convert from KBytes to Bytes)
|
||||
# cost_multiply_factor = 0.0 # Multiply cost before export (0.0 to disable), eg: add VAT
|
||||
# cost_rounding_decimals = -1 # Rounding decimals for Cost values. -1 to disable rounding
|
||||
# cost_shift_digits = 0 # Shift digits in the cost on export (eg: convert from EUR to cents)
|
||||
# mask_destination_id = # Destination id containing called addresses to be masked on export
|
||||
# mask_length = 0 # Length of the destination suffix to be masked
|
||||
# export_dir = /var/log/cgrates/cdre # Path where the exported CDRs will be placed
|
||||
# export_template = cgrid,mediation_runid,tor,accid,reqtype,direction,tenant,category,account,subject,destination,setup_time,answer_time,usage,cost
|
||||
# Exported fields template <""|fld1,fld2|*xml:instance_name>
|
||||
[cdrc]
|
||||
# enabled = false # Enable CDR client functionality
|
||||
# cdrs = internal # Address where to reach CDR server. <internal|127.0.0.1:2080>
|
||||
# run_delay = 0 # Sleep interval in seconds between consecutive runs, 0 to use automation via inotify
|
||||
# cdr_type = csv # CDR file format <csv|freeswitch_csv>.
|
||||
# csv_separator = , # Separator used in case of csv files. One character only supported and needs to be right after equal sign
|
||||
# cdr_in_dir = /var/log/cgrates/cdrc/in # Absolute path towards the directory where the CDRs are stored.
|
||||
# cdr_out_dir = /var/log/cgrates/cdrc/out # Absolute path towards the directory where processed CDRs will be moved.
|
||||
# cdr_source_id = csv # Free form field, tag identifying the source of the CDRs within CGRS database.
|
||||
# tor_field = 2 # TypeOfRecord field identifier. Use index number in case of .csv cdrs.
|
||||
# accid_field = 3 # Accounting id field identifier. Use index number in case of .csv cdrs.
|
||||
# reqtype_field = 4 # Request type field identifier. Use index number in case of .csv cdrs.
|
||||
# direction_field = 5 # Direction field identifier. Use index numbers in case of .csv cdrs.
|
||||
# tenant_field = 6 # Tenant field identifier. Use index numbers in case of .csv cdrs.
|
||||
# category_field = 7 # Type of Record field identifier. Use index numbers in case of .csv cdrs.
|
||||
# account_field = 8 # Account field identifier. Use index numbers in case of .csv cdrs.
|
||||
# subject_field = 9 # Subject field identifier. Use index numbers in case of .csv CDRs.
|
||||
# destination_field = 10 # Destination field identifier. Use index numbers in case of .csv cdrs.
|
||||
# setup_time_field = 11 # Setup time field identifier. Use index numbers in case of .csv cdrs.
|
||||
# answer_time_field = 12 # Answer time field identifier. Use index numbers in case of .csv cdrs.
|
||||
# usage_field = 13 # Usage field identifier. Use index numbers in case of .csv cdrs.
|
||||
# extra_fields = # Extra fields identifiers. For .csv, format: <label_extrafield_1>:<index_extrafield_1>[...,<label_extrafield_n>:<index_extrafield_n>]
|
||||
|
||||
[mediator]
|
||||
# enabled = false # Starts Mediator service: <true|false>.
|
||||
# reconnects = 3 # Number of reconnects to rater/cdrs before giving up.
|
||||
# rater = internal # Address where to reach the Rater: <internal|x.y.z.y:1234>
|
||||
# cdrstats = internal # Address where to reach the cdrstats service: <internal|x.y.z.y:1234>
|
||||
# store_disable = false # When true, CDRs will not longer be saved in stordb, useful for cdrstats only scenario
|
||||
|
||||
|
||||
[cdrstats]
|
||||
# enabled = false # Starts the cdrstats service: <true|false>
|
||||
# queue_length = 50 # Number of items in the stats buffer
|
||||
# time_window = 1h # Will only keep the CDRs who's call setup time is not older than time.Now()-TimeWindow
|
||||
# metrics = ASR, ACD, ACC # Stat metric ids to build
|
||||
# setup_interval = # Filter on CDR SetupTime
|
||||
# tors = # Filter on CDR TOR fields
|
||||
# cdr_hosts= # Filter on CDR CdrHost fields
|
||||
# cdr_sources = # Filter on CDR CdrSource fields
|
||||
# req_types = # Filter on CDR ReqType fields
|
||||
# directions = # Filter on CDR Direction fields
|
||||
# tenants = # Filter on CDR Tenant fields
|
||||
# categories = # Filter on CDR Category fields
|
||||
# accounts = # Filter on CDR Account fields
|
||||
# subjects = # Filter on CDR Subject fields
|
||||
# destination_prefixes = # Filter on CDR Destination prefixes
|
||||
# usage_interval = # Filter on CDR Usage
|
||||
# mediation_run_ids = # Filter on CDR MediationRunId fields
|
||||
# rated_accounts = # Filter on CDR RatedAccount fields
|
||||
# rated_subjects = # Filter on CDR RatedSubject fields
|
||||
# cost_intervals = # Filter on CDR Cost
|
||||
|
||||
[session_manager]
|
||||
# enabled = false # Starts SessionManager service: <true|false>
|
||||
# switch_type = freeswitch # Defines the type of switch behind: <freeswitch>
|
||||
# rater = internal # Address where to reach the Rater <""|internal|127.0.0.1:2013>
|
||||
# cdrs = # Address where to reach CDR Server, empty to disable CDR capturing <""|internal|127.0.0.1:2013>
|
||||
# reconnects = 3 # Number of reconnects to rater/cdrs before giving up.
|
||||
# debit_interval = 10 # Interval to perform debits on.
|
||||
# min_call_duration = 0s # Only authorize calls with allowed duration bigger than this
|
||||
# max_call_duration = 3h # Maximum call duration a prepaid call can last
|
||||
|
||||
[freeswitch]
|
||||
# server = 127.0.0.1:8021 # Adress where to connect to FreeSWITCH socket.
|
||||
# passwd = ClueCon # FreeSWITCH socket password.
|
||||
# reconnects = 5 # Number of attempts on connect failure.
|
||||
# min_dur_low_balance = 5s # Threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval)
|
||||
# low_balance_ann_file = # File to be played when low balance is reached for prepaid calls
|
||||
# empty_balance_context = # If defined, prepaid calls will be transfered to this context on empty balance
|
||||
# empty_balance_ann_file = # File to be played before disconnecting prepaid calls on empty balance (applies only if no context defined)
|
||||
# cdr_extra_fields = # Extra fields to store in CDRs in case of processing them
|
||||
|
||||
[opensips]
|
||||
# listen_udp = 127.0.0.1:2020 # Address where to listen for datagram events coming from OpenSIPS
|
||||
# mi_addr = 127.0.0.1:8020 # Adress where to reach OpenSIPS mi_datagram module
|
||||
# events_subscribe_interval = 60s # Automatic events subscription to OpenSIPS, 0 to disable it
|
||||
# reconnects = 3 # Number of attempts on connect failure.
|
||||
|
||||
[derived_charging]
|
||||
# run_ids = # Identifiers of additional sessions control.
|
||||
# run_filters = # List of cdr field filters for each run.
|
||||
# reqtype_fields = # Name of request type fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# direction_fields = # Name of direction fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# tenant_fields = # Name of tenant fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# category_fields = # Name of tor fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# account_fields = # Name of account fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# subject_fields = # Name of fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# destination_fields = # Name of destination fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# setup_time_fields = # Name of setup_time fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# answer_time_fields = # Name of answer_time fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# usage_fields = # Name of usage fields to be used during additional sessions control <""|*default|field_name>.
|
||||
# combined_chargers = true # Combine accounts specific derived_chargers with server configured ones <true|false>.
|
||||
|
||||
[history_server]
|
||||
# enabled = false # Starts History service: <true|false>.
|
||||
# history_dir = /var/log/cgrates/history # Location on disk where to store history files.
|
||||
# save_interval = 1s # Interval to save changed cache into .git archive
|
||||
|
||||
[history_agent]
|
||||
# enabled = false # Starts History as a client: <true|false>.
|
||||
# server = internal # Address where to reach the master history server: <internal|x.y.z.y:1234>
|
||||
|
||||
[mailer]
|
||||
# server = localhost # The server to use when sending emails out
|
||||
# auth_user = cgrates # Authenticate to email server using this user
|
||||
# auth_passwd = CGRateS.org # Authenticate to email server with this password
|
||||
# from_address = cgr-mailer@localhost.localdomain # From address used when sending emails out
|
||||
260
docs/cgrates_json.rst
Normal file
260
docs/cgrates_json.rst
Normal file
@@ -0,0 +1,260 @@
|
||||
cgr-engine configuration file
|
||||
=============================
|
||||
|
||||
Organized into configuration sections. All configuration options come with defaults and we have tried our best to choose the best ones for a minimum of efforts necessary when running.
|
||||
|
||||
Bellow is the default configuration file which comes hardcoded into cgr-engine, most of them being explained and exemplified there.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
|
||||
// Real-time Charging System for Telecom & ISP environments
|
||||
// Copyright (C) ITsysCOM GmbH
|
||||
//
|
||||
// This file contains the default configuration hardcoded into CGRateS.
|
||||
// This is what you get when you load CGRateS with an empty configuration file.
|
||||
|
||||
//"general": {
|
||||
// "http_skip_tls_verify": false, // if enabled Http Client will accept any TLS certificate
|
||||
// "rounding_decimals": 10, // system level precision for floats
|
||||
// "dbdata_encoding": "msgpack", // encoding used to store object data in strings: <msgpack|json>
|
||||
// "tpexport_dir": "/var/log/cgrates/tpe", // path towards export folder for offline Tariff Plans
|
||||
// "default_reqtype": "*rated", // default request type to consider when missing from requests: <""|*prepaid|*postpaid|*pseudoprepaid|*rated>
|
||||
// "default_category": "call", // default Type of Record to consider when missing from requests
|
||||
// "default_tenant": "cgrates.org", // default Tenant to consider when missing from requests
|
||||
// "default_subject": "cgrates", // default rating Subject to consider when missing from requests
|
||||
// "connect_attempts": 3, // initial server connect attempts
|
||||
// "reconnects": -1, // number of retries in case of connection lost
|
||||
//},
|
||||
|
||||
|
||||
//"listen": {
|
||||
// "rpc_json": "127.0.0.1:2012", // RPC JSON listening address
|
||||
// "rpc_gob": "127.0.0.1:2013", // RPC GOB listening address
|
||||
// "http": "127.0.0.1:2080", // HTTP listening address
|
||||
//},
|
||||
|
||||
|
||||
//"tariffplan_db": { // database used to store active tariff plan configuration
|
||||
// "db_type": "redis", // tariffplan_db type: <redis>
|
||||
// "db_host": "127.0.0.1", // tariffplan_db host address
|
||||
// "db_port": 6379, // port to reach the tariffplan_db
|
||||
// "db_name": "10", // tariffplan_db name to connect to
|
||||
// "db_user": "", // sername to use when connecting to tariffplan_db
|
||||
// "db_passwd": "", // password to use when connecting to tariffplan_db
|
||||
//},
|
||||
|
||||
|
||||
//"data_db": { // database used to store runtime data (eg: accounts, cdr stats)
|
||||
// "db_type": "redis", // data_db type: <redis>
|
||||
// "db_host": "127.0.0.1", // data_db host address
|
||||
// "db_port": 6379, // data_db port to reach the database
|
||||
// "db_name": "11", // data_db database name to connect to
|
||||
// "db_user": "", // username to use when connecting to data_db
|
||||
// "db_passwd": "", // password to use when connecting to data_db
|
||||
//},
|
||||
|
||||
|
||||
//"stor_db": { // database used to store offline tariff plans and CDRs
|
||||
// "db_type": "mysql", // stor database type to use: <mysql|postgres>
|
||||
// "db_host": "127.0.0.1", // the host to connect to
|
||||
// "db_port": 3306, // the port to reach the stordb
|
||||
// "db_name": "cgrates", // stor database name
|
||||
// "db_user": "cgrates", // username to use when connecting to stordb
|
||||
// "db_passwd": "CGRateS.org", // password to use when connecting to stordb
|
||||
// "max_open_conns": 100, // maximum database connections opened
|
||||
// "max_idle_conns": 10, // maximum database connections idle
|
||||
//},
|
||||
|
||||
|
||||
//"balancer": {
|
||||
// "enabled": false, // start Balancer service: <true|false>
|
||||
//},
|
||||
|
||||
|
||||
//"rater": {
|
||||
// "enabled": false, // enable Rater service: <true|false>
|
||||
// "balancer": "", // register to balancer as worker: <""|internal|x.y.z.y:1234>
|
||||
// "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality: <""|internal|x.y.z.y:1234>
|
||||
// "historys": "", // address where to reach the history service, empty to disable history functionality: <""|internal|x.y.z.y:1234>
|
||||
// "pubsubs": "", // address where to reach the pubusb service, empty to disable pubsub functionality: <""|internal|x.y.z.y:1234>
|
||||
// "users": "", // address where to reach the user service, empty to disable user profile functionality: <""|internal|x.y.z.y:1234>
|
||||
//},
|
||||
|
||||
|
||||
//"scheduler": {
|
||||
// "enabled": false, // start Scheduler service: <true|false>
|
||||
//},
|
||||
|
||||
|
||||
//"cdrs": {
|
||||
// "enabled": false, // start the CDR Server service: <true|false>
|
||||
// "extra_fields": [], // extra fields to store in CDRs for non-generic CDRs
|
||||
// "store_cdrs": true, // store cdrs in storDb
|
||||
// "rater": "internal", // address where to reach the Rater for cost calculation, empty to disable functionality: <""|internal|x.y.z.y:1234>
|
||||
// "cdrstats": "", // address where to reach the cdrstats service, empty to disable stats functionality<""|internal|x.y.z.y:1234>
|
||||
// "reconnects": 5, // number of reconnect attempts to rater or cdrs
|
||||
// "cdr_replication":[], // replicate the raw CDR to a number of servers
|
||||
//},
|
||||
|
||||
|
||||
//"cdrstats": {
|
||||
// "enabled": false, // starts the cdrstats service: <true|false>
|
||||
// "save_interval": "1m", // interval to save changed stats into dataDb storage
|
||||
//},
|
||||
|
||||
|
||||
//"cdre": {
|
||||
// "*default": {
|
||||
// "cdr_format": "csv", // exported CDRs format <csv>
|
||||
// "field_separator": ",",
|
||||
// "data_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from KBytes to Bytes)
|
||||
// "sms_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from SMS unit to call duration in some billing systems)
|
||||
// "generic_usage_multiply_factor": 1, // multiply data usage before export (eg: convert from GENERIC unit to call duration in some billing systems)
|
||||
// "cost_multiply_factor": 1, // multiply cost before export, eg: add VAT
|
||||
// "cost_rounding_decimals": -1, // rounding decimals for Cost values. -1 to disable rounding
|
||||
// "cost_shift_digits": 0, // shift digits in the cost on export (eg: convert from EUR to cents)
|
||||
// "mask_destination_id": "MASKED_DESTINATIONS", // destination id containing called addresses to be masked on export
|
||||
// "mask_length": 0, // length of the destination suffix to be masked
|
||||
// "export_dir": "/var/log/cgrates/cdre", // path where the exported CDRs will be placed
|
||||
// "header_fields": [], // template of the exported header fields
|
||||
// "content_fields": [ // template of the exported content fields
|
||||
// {"tag": "CgrId", "cdr_field_id": "cgrid", "type": "cdrfield", "value": "cgrid"},
|
||||
// {"tag":"RunId", "cdr_field_id": "mediation_runid", "type": "cdrfield", "value": "mediation_runid"},
|
||||
// {"tag":"Tor", "cdr_field_id": "tor", "type": "cdrfield", "value": "tor"},
|
||||
// {"tag":"AccId", "cdr_field_id": "accid", "type": "cdrfield", "value": "accid"},
|
||||
// {"tag":"ReqType", "cdr_field_id": "reqtype", "type": "cdrfield", "value": "reqtype"},
|
||||
// {"tag":"Direction", "cdr_field_id": "direction", "type": "cdrfield", "value": "direction"},
|
||||
// {"tag":"Tenant", "cdr_field_id": "tenant", "type": "cdrfield", "value": "tenant"},
|
||||
// {"tag":"Category", "cdr_field_id": "category", "type": "cdrfield", "value": "category"},
|
||||
// {"tag":"Account", "cdr_field_id": "account", "type": "cdrfield", "value": "account"},
|
||||
// {"tag":"Subject", "cdr_field_id": "subject", "type": "cdrfield", "value": "subject"},
|
||||
// {"tag":"Destination", "cdr_field_id": "destination", "type": "cdrfield", "value": "destination"},
|
||||
// {"tag":"SetupTime", "cdr_field_id": "setup_time", "type": "cdrfield", "value": "setup_time", "layout": "2006-01-02T15:04:05Z07:00"},
|
||||
// {"tag":"AnswerTime", "cdr_field_id": "answer_time", "type": "cdrfield", "value": "answer_time", "layout": "2006-01-02T15:04:05Z07:00"},
|
||||
// {"tag":"Usage", "cdr_field_id": "usage", "type": "cdrfield", "value": "usage"},
|
||||
// {"tag":"Cost", "cdr_field_id": "cost", "type": "cdrfield", "value": "cost"},
|
||||
// ],
|
||||
// "trailer_fields": [], // template of the exported trailer fields
|
||||
// }
|
||||
//},
|
||||
|
||||
|
||||
//"cdrc": {
|
||||
// "*default": {
|
||||
// "enabled": false, // enable CDR client functionality
|
||||
// "dry_run": false, // do not send the CDRs to CDRS, just parse them
|
||||
// "cdrs": "internal", // address where to reach CDR server. <internal|x.y.z.y:1234>
|
||||
// "cdr_format": "csv", // CDR file format <csv|freeswitch_csv|fwv|opensips_flatstore>
|
||||
// "field_separator": ",", // separator used in case of csv files
|
||||
// "run_delay": 0, // sleep interval in seconds between consecutive runs, 0 to use automation via inotify
|
||||
// "max_open_files": 1024, // maximum simultaneous files to process, 0 for unlimited
|
||||
// "data_usage_multiply_factor": 1024, // conversion factor for data usage
|
||||
// "cdr_in_dir": "/var/log/cgrates/cdrc/in", // absolute path towards the directory where the CDRs are stored
|
||||
// "cdr_out_dir": "/var/log/cgrates/cdrc/out", // absolute path towards the directory where processed CDRs will be moved
|
||||
// "failed_calls_prefix": "missed_calls", // used in case of flatstore CDRs to avoid searching for BYE records
|
||||
// "cdr_source_id": "freeswitch_csv", // free form field, tag identifying the source of the CDRs within CDRS database
|
||||
// "cdr_filter": "", // filter CDR records to import
|
||||
// "partial_record_cache": "10s", // duration to cache partial records when not pairing
|
||||
// "header_fields": [], // template of the import header fields
|
||||
// "content_fields":[ // import content_fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value
|
||||
// {"tag": "tor", "cdr_field_id": "tor", "type": "cdrfield", "value": "2", "mandatory": true},
|
||||
// {"tag": "accid", "cdr_field_id": "accid", "type": "cdrfield", "value": "3", "mandatory": true},
|
||||
// {"tag": "reqtype", "cdr_field_id": "reqtype", "type": "cdrfield", "value": "4", "mandatory": true},
|
||||
// {"tag": "direction", "cdr_field_id": "direction", "type": "cdrfield", "value": "5", "mandatory": true},
|
||||
// {"tag": "tenant", "cdr_field_id": "tenant", "type": "cdrfield", "value": "6", "mandatory": true},
|
||||
// {"tag": "category", "cdr_field_id": "category", "type": "cdrfield", "value": "7", "mandatory": true},
|
||||
// {"tag": "account", "cdr_field_id": "account", "type": "cdrfield", "value": "8", "mandatory": true},
|
||||
// {"tag": "subject", "cdr_field_id": "subject", "type": "cdrfield", "value": "9", "mandatory": true},
|
||||
// {"tag": "destination", "cdr_field_id": "destination", "type": "cdrfield", "value": "10", "mandatory": true},
|
||||
// {"tag": "setup_time", "cdr_field_id": "setup_time", "type": "cdrfield", "value": "11", "mandatory": true},
|
||||
// {"tag": "answer_time", "cdr_field_id": "answer_time", "type": "cdrfield", "value": "12", "mandatory": true},
|
||||
// {"tag": "usage", "cdr_field_id": "usage", "type": "cdrfield", "value": "13", "mandatory": true},
|
||||
// ],
|
||||
// "trailer_fields": [], // template of the import trailer fields
|
||||
// }
|
||||
//},
|
||||
|
||||
|
||||
//"sm_freeswitch": {
|
||||
// "enabled": false, // starts SessionManager service: <true|false>
|
||||
// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013>
|
||||
// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234>
|
||||
// "reconnects": 5, // number of reconnect attempts to rater or cdrs
|
||||
// "create_cdr": false, // create CDR out of events and sends them to CDRS component
|
||||
// "cdr_extra_fields": [], // extra fields to store in CDRs when creating them
|
||||
// "debit_interval": "10s", // interval to perform debits on.
|
||||
// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this
|
||||
// "max_call_duration": "3h", // maximum call duration a prepaid call can last
|
||||
// "min_dur_low_balance": "5s", // threshold which will trigger low balance warnings for prepaid calls (needs to be lower than debit_interval)
|
||||
// "low_balance_ann_file": "", // file to be played when low balance is reached for prepaid calls
|
||||
// "empty_balance_context": "", // if defined, prepaid calls will be transfered to this context on empty balance
|
||||
// "empty_balance_ann_file": "", // file to be played before disconnecting prepaid calls on empty balance (applies only if no context defined)
|
||||
// "subscribe_park": true, // subscribe via fsock to receive park events
|
||||
// "channel_sync_interval": "5m", // sync channels with freeswitch regularly
|
||||
// "connections":[ // instantiate connections to multiple FreeSWITCH servers
|
||||
// {"server": "127.0.0.1:8021", "password": "ClueCon", "reconnects": 5}
|
||||
// ],
|
||||
//},
|
||||
|
||||
|
||||
//"sm_kamailio": {
|
||||
// "enabled": false, // starts SessionManager service: <true|false>
|
||||
// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013>
|
||||
// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234>
|
||||
// "reconnects": 5, // number of reconnect attempts to rater or cdrs
|
||||
// "create_cdr": false, // create CDR out of events and sends them to CDRS component
|
||||
// "debit_interval": "10s", // interval to perform debits on.
|
||||
// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this
|
||||
// "max_call_duration": "3h", // maximum call duration a prepaid call can last
|
||||
// "connections":[ // instantiate connections to multiple Kamailio servers
|
||||
// {"evapi_addr": "127.0.0.1:8448", "reconnects": 5}
|
||||
// ],
|
||||
//},
|
||||
|
||||
|
||||
//"sm_opensips": {
|
||||
// "enabled": false, // starts SessionManager service: <true|false>
|
||||
// "listen_udp": "127.0.0.1:2020", // address where to listen for datagram events coming from OpenSIPS
|
||||
// "rater": "internal", // address where to reach the Rater <""|internal|127.0.0.1:2013>
|
||||
// "cdrs": "internal", // address where to reach CDR Server, empty to disable CDR capturing <""|internal|x.y.z.y:1234>
|
||||
// "reconnects": 5, // number of reconnects if connection is lost
|
||||
// "create_cdr": false, // create CDR out of events and sends them to CDRS component
|
||||
// "debit_interval": "10s", // interval to perform debits on.
|
||||
// "min_call_duration": "0s", // only authorize calls with allowed duration higher than this
|
||||
// "max_call_duration": "3h", // maximum call duration a prepaid call can last
|
||||
// "events_subscribe_interval": "60s", // automatic events subscription to OpenSIPS, 0 to disable it
|
||||
// "mi_addr": "127.0.0.1:8020", // address where to reach OpenSIPS MI to send session disconnects
|
||||
//},
|
||||
|
||||
|
||||
//"historys": {
|
||||
// "enabled": false, // starts History service: <true|false>.
|
||||
// "history_dir": "/var/log/cgrates/history", // location on disk where to store history files.
|
||||
// "save_interval": "1s", // interval to save changed cache into .git archive
|
||||
//},
|
||||
|
||||
|
||||
//"pubsubs": {
|
||||
// "enabled": false, // starts PubSub service: <true|false>.
|
||||
//},
|
||||
|
||||
|
||||
//"users": {
|
||||
// "enabled": false, // starts User service: <true|false>.
|
||||
// "indexes": [], // user profile field indexes
|
||||
//},
|
||||
|
||||
|
||||
//"mailer": {
|
||||
// "server": "localhost", // the server to use when sending emails out
|
||||
// "auth_user": "cgrates", // authenticate to email server using this user
|
||||
// "auth_passwd": "CGRateS.org", // authenticate to email server with this password
|
||||
// "from_address": "cgr-mailer@localhost.localdomain" // from address used when sending emails out
|
||||
//},
|
||||
|
||||
|
||||
}
|
||||
|
||||
:file: ../data/conf/cgrates/cgrates.json
|
||||
@@ -3,14 +3,14 @@ Configuration
|
||||
|
||||
The behaviour of **CGRateS** can be externally influenced by following means:
|
||||
|
||||
- Engine configuration file, ussually located at */etc/cgrates/cgrates.cfg*
|
||||
- Tariff Plans: set of files used to import customer rating and accounting data into CGRateS.
|
||||
- Engine configuration files, usually located at */etc/cgrates/*. There can be one or multiple file/folder hierarchies behind configuration folder with support for automatic includes. The folders/files will be imported in alphabetical order into final configuration object.
|
||||
- Tariff Plans: set of files used to import various data used in CGRateS subsystems (eg: Rating, Accounting, LCR, DerivedCharging, etc).
|
||||
- RPC APIs: set of JSON/GOB encoded APIs remotely available for various operational/administrative tasks.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
cgrates_cfg
|
||||
cgrates_json
|
||||
tariff_plans
|
||||
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ We recommend using source installs for advanced users familiar with Go programmi
|
||||
3.1. Using packages
|
||||
-------------------
|
||||
|
||||
3.1.2. Debian Wheezy
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
3.1.2. Debian Jessie/Wheezy
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is for the moment the only packaged and the most recommended to use method to install CGRateS.
|
||||
|
||||
@@ -22,7 +22,7 @@ On the server you want to install CGRateS, simply execute the following commands
|
||||
apt-get install cgrates
|
||||
|
||||
Once the installation is completed, one should perform the post-install section in order to have the CGRateS properly set and ready to run.
|
||||
After post-install actions are performed, CGRateS will be configured in */etc/cgrates/cgrates.cfg* and enabled in */etc/default/cgrates*.
|
||||
After post-install actions are performed, CGRateS will be configured in */etc/cgrates/cgrates.json* and enabled in */etc/default/cgrates*.
|
||||
|
||||
3.2. Using source
|
||||
-----------------
|
||||
@@ -45,18 +45,20 @@ Database setup
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
For it's operation CGRateS uses more database types, depending on it's nature, install and configuration being further necessary.
|
||||
|
||||
At present we support the following databases:
|
||||
|
||||
As DataDB types (rating and accounting subsystems):
|
||||
|
||||
|
||||
- Redis_
|
||||
|
||||
As StorDB (persistent storage for CDRs and tariff plan versions).
|
||||
|
||||
Used as DataDb, optimized for real-time information access.
|
||||
Once installed there should be no special requirements in terms of setup since no schema is necessary.
|
||||
|
||||
|
||||
- MySQL_
|
||||
|
||||
Used as StorDb, optimized for CDR archiving and offline Tariff Plan versioning.
|
||||
Once database is installed, CGRateS database needs to be set-up out of provided scripts (example for the paths set-up by debian package)
|
||||
|
||||
::
|
||||
@@ -64,8 +66,19 @@ Once database is installed, CGRateS database needs to be set-up out of provided
|
||||
cd /usr/share/cgrates/storage/mysql/
|
||||
./setup_cgr_db.sh root CGRateS.org localhost
|
||||
|
||||
- PostgreSQL_
|
||||
|
||||
Used as StorDb, optimized for CDR archiving and offline Tariff Plan versioning.
|
||||
Once database is installed, CGRateS database needs to be set-up out of provided scripts (example for the paths set-up by debian package)
|
||||
|
||||
::
|
||||
|
||||
cd /usr/share/cgrates/storage/postgres/
|
||||
./setup_cgr_db.sh
|
||||
|
||||
.. _Redis: http://redis.io/
|
||||
.. _MySQL: http://www.mysql.org/
|
||||
.. _PostgreSQL: http://www.postgresql.org/
|
||||
|
||||
|
||||
Git
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FreeSWITCH Integration Tutorials
|
||||
================================
|
||||
|
||||
In these tutorials we exemplify few cases of integration between FreeSWITCH_ and **CGRateS**. We start with common steps, installation and postinstall processes then we dive into particular configurations, depending on the case we run.
|
||||
In these tutorials we exemplify few cases of integration between FreeSWITCH_ and **CGRateS**. We start with common steps, installation and postinstall processes then we dive into particular configurations.
|
||||
|
||||
|
||||
.. toctree::
|
||||
@@ -10,7 +10,6 @@ In these tutorials we exemplify few cases of integration between FreeSWITCH_ and
|
||||
tut_freeswitch_installs
|
||||
tut_cgrates_installs
|
||||
tut_jitsi_installs
|
||||
tut_freeswitch_csv
|
||||
tut_freeswitch_json
|
||||
tut_cgrates_usage
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ FreeSWITCH_ generating *http-json* CDRs
|
||||
Scenario
|
||||
--------
|
||||
|
||||
- FreeSWITCH with *vanilla* configuration, replacing *mod_cdr_csv* with *mod_json_cdr*.
|
||||
- FreeSWITCH with *vanilla* configuration adding *mod_json_cdr* for CDR generation.
|
||||
|
||||
- Modified following users (with configs in *etc/freeswitch/directory/default*): 1001-prepaid, 1002-postpaid, 1003-pseudoprepaid, 1004-rated, 1006-prepaid, 1007-rated.
|
||||
- Have added inside default dialplan CGR own extensions just before routing towards users (*etc/freeswitch/dialplan/default.xml*).
|
||||
@@ -13,8 +13,8 @@ Scenario
|
||||
- **CGRateS** with following components:
|
||||
|
||||
- CGR-SM started as prepaid controller, with debits taking place at 5s intervals.
|
||||
- CGR-Mediator component attaching costs to the raw CDRs from FreeSWITCH_ inside CGR StorDB.
|
||||
- CGR-CDRE exporting mediated CDRs from CGR StorDB (export path: */tmp*).
|
||||
- CGR-CDRS component receiving raw CDRs from FreeSWITCH, storing them and attaching costs inside CGR StorDB.
|
||||
- CGR-CDRE exporting processed CDRs from CGR StorDB (export path: */tmp*).
|
||||
- CGR-History component keeping the archive of the rates modifications (path browsable with git client at */tmp/cgr_history*).
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ Starting FreeSWITCH_ with custom configuration
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/fs_json/freeswitch/etc/init.d/freeswitch start
|
||||
/usr/share/cgrates/tutorials/fs_evsock/freeswitch/etc/init.d/freeswitch start
|
||||
|
||||
To verify that FreeSWITCH_ is running we run the console command:
|
||||
|
||||
@@ -37,7 +37,7 @@ Starting **CGRateS** with custom configuration
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/fs_json/cgrates/etc/init.d/cgrates start
|
||||
/usr/share/cgrates/tutorials/fs_evsock/cgrates/etc/init.d/cgrates start
|
||||
|
||||
Check that cgrates is running
|
||||
|
||||
@@ -49,7 +49,7 @@ Check that cgrates is running
|
||||
CDR processing
|
||||
--------------
|
||||
|
||||
At the end of each call FreeSWITCH_ will issue a http post with the CDR. This will reach inside **CGRateS** through the *CDRS* component (close to real-time). Once in-there it will be instantly mediated and it is ready to be exported:
|
||||
At the end of each call FreeSWITCH_ will issue a http post with the CDR. This will reach inside **CGRateS** through the *CDRS* component (close to real-time). Once in-there it will be instantly rated and it is ready to be exported:
|
||||
|
||||
::
|
||||
|
||||
|
||||
18
docs/tut_kamailio.rst
Normal file
18
docs/tut_kamailio.rst
Normal file
@@ -0,0 +1,18 @@
|
||||
Kamailio_ Integration Tutorials
|
||||
===============================
|
||||
|
||||
In these tutorials we exemplify few cases of integration between Kamailio_ and CGRateS_. We start with common steps, installation and postinstall processes then we dive into particular configurations, depending on the case we run.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
tut_kamailio_installs
|
||||
tut_cgrates_installs
|
||||
tut_jitsi_installs
|
||||
tut_kamailio_evapi
|
||||
tut_cgrates_usage
|
||||
|
||||
.. _Kamailio: http://www.kamailio.org/
|
||||
.. _CGRateS: http://www.cgrates.org/
|
||||
|
||||
59
docs/tut_kamailio_evapi.rst
Normal file
59
docs/tut_kamailio_evapi.rst
Normal file
@@ -0,0 +1,59 @@
|
||||
Kamailio_ interaction via *evapi* module
|
||||
=========================================
|
||||
|
||||
Scenario
|
||||
--------
|
||||
|
||||
- Kamailio default configuration modified for **CGRateS** interaction. For script maintainability and simplicity we have separated CGRateS specific routes in *kamailio-cgrates.cfg* file which is included in main *kamailio.cfg* via include directive.
|
||||
|
||||
- Considering the following users (with configs hardcoded in the *kamailio.cfg* configuration script and loaded in htable): 1001-prepaid, 1002-postpaid, 1003-pseudoprepaid, 1004-rated, 1005-rated, 1006-prepaid, 1007-prepaid.
|
||||
|
||||
- **CGRateS** with following components:
|
||||
|
||||
- CGR-SM started as translator between Kamailio_ and CGR-Rater for both authorization events as well as accounting ones.
|
||||
- CGR-CDRS component processing raw CDRs from CGR-SM component and storing them inside CGR StorDB.
|
||||
- CGR-CDRE exporting rated CDRs from CGR StorDB (export path: */tmp*).
|
||||
- CGR-History component keeping the archive of the rates modifications (path browsable with git client at */tmp/cgr_history*).
|
||||
|
||||
|
||||
Starting Kamailio_ with custom configuration
|
||||
----------------------------------------------
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/kamevapi/kamailio/etc/init.d/kamailio start
|
||||
|
||||
To verify that Kamailio_ is running we run the console command:
|
||||
|
||||
::
|
||||
|
||||
kamctl moni
|
||||
|
||||
|
||||
Starting **CGRateS** with custom configuration
|
||||
----------------------------------------------
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/kamevapi/cgrates/etc/init.d/cgrates start
|
||||
|
||||
Make sure that cgrates is running
|
||||
|
||||
::
|
||||
|
||||
cgr-console status
|
||||
|
||||
|
||||
CDR processing
|
||||
--------------
|
||||
|
||||
At the end of each call Kamailio_ will generate an CDR event via *evapi* and this will be directed towards the port configured inside *cgrates.json*. This event will reach inside **CGRateS** through the *SM* component (close to real-time). Once in-there it will be instantly rated and be ready for export.
|
||||
|
||||
|
||||
**CGRateS** Usage
|
||||
-----------------
|
||||
|
||||
Since it is common to most of the tutorials, the example for **CGRateS** usage is provided in a separate page `here <http://cgrates.readthedocs.org/en/latest/tut_cgrates_usage.html>`_
|
||||
|
||||
|
||||
.. _Kamailio: http://www.kamailio.org/
|
||||
20
docs/tut_kamailio_installs.rst
Normal file
20
docs/tut_kamailio_installs.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Software installation
|
||||
=====================
|
||||
|
||||
As operating system we have choosen Debian Wheezy, since all the software components we use provide packaging for it.
|
||||
|
||||
Kamailio_
|
||||
---------
|
||||
|
||||
We got Kamailio_ installed via following commands:
|
||||
::
|
||||
|
||||
apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 0xfb40d3e6508ea4c8
|
||||
cd /etc/apt/sources.list.d/
|
||||
wget http://apt.itsyscom.com/conf/kamailio.apt.list .
|
||||
apt-get update
|
||||
apt-get install kamailio kamailio-extra-modules kamailio-json-modules
|
||||
|
||||
Once installed we proceed with loading the configuration out of specific tutorial cases bellow.
|
||||
|
||||
.. _Kamailio: http://www.kamailio.org/
|
||||
@@ -12,8 +12,8 @@ Scenario
|
||||
- **CGRateS** with following components:
|
||||
|
||||
- CGR-SM started as translator between OpenSIPS_ and **cgr-rater** for both authorization events (pseudoprepaid) as well as CDR ones.
|
||||
- CGR-Mediator component attaching costs to the raw CDRs from OpenSIPS_ inside CGR StorDB.
|
||||
- CGR-CDRE exporting mediated CDRs from CGR StorDB (export path: */tmp*).
|
||||
- CGR-CDRS component processing raw CDRs from CGR-SM component and storing them inside CGR StorDB.
|
||||
- CGR-CDRE exporting rated CDRs from CGR StorDB (export path: */tmp*).
|
||||
- CGR-History component keeping the archive of the rates modifications (path browsable with git client at */tmp/cgr_history*).
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ Starting OpenSIPS_ with custom configuration
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/osips_event/opensips/etc/init.d/opensips start
|
||||
/usr/share/cgrates/tutorials/osips_async/opensips/etc/init.d/opensips start
|
||||
|
||||
To verify that OpenSIPS_ is running we run the console command:
|
||||
|
||||
@@ -36,9 +36,9 @@ Starting **CGRateS** with custom configuration
|
||||
|
||||
::
|
||||
|
||||
/usr/share/cgrates/tutorials/osips_event/cgrates/etc/init.d/cgrates start
|
||||
/usr/share/cgrates/tutorials/osips_async/cgrates/etc/init.d/cgrates start
|
||||
|
||||
Check that cgrates is running
|
||||
Make sure that cgrates is running
|
||||
|
||||
::
|
||||
|
||||
@@ -48,7 +48,7 @@ Check that cgrates is running
|
||||
CDR processing
|
||||
--------------
|
||||
|
||||
At the end of each call OpenSIPS_ will generate an CDR event and due to automatic handler registration built in **CGRateS-SM** component, this will be directed towards the port configured inside *cgrates.cfg*. This event will reach inside **CGRateS** through the *SM* component (close to real-time). Once in-there it will be instantly mediated and it is ready to be exported.
|
||||
At the end of each call OpenSIPS_ will generate an CDR event and due to automatic handler registration built in **CGRateS-SM** component, this will be directed towards the port configured inside *cgrates.json*. This event will reach inside **CGRateS** through the *SM* component (close to real-time). Once in-there it will be instantly rated and be ready for export.
|
||||
|
||||
|
||||
**CGRateS** Usage
|
||||
|
||||
@@ -12,9 +12,9 @@ We got OpenSIPS_ installed via following commands:
|
||||
wget http://apt.opensips.org/key.asc
|
||||
apt-key add key.asc
|
||||
cd /etc/apt/sources.list.d/
|
||||
wget http://apt.itsyscom.com/conf/opensips.apt.list
|
||||
wget http://apt.itsyscom.com/conf/opensips.wheezy.apt.list
|
||||
apt-get update
|
||||
apt-get install
|
||||
apt-get install opensips opensips-json-module opensips-restclient-module
|
||||
|
||||
Once installed we proceed with loading the configuration out of specific tutorial cases bellow.
|
||||
|
||||
|
||||
@@ -5,4 +5,5 @@
|
||||
:maxdepth: 2
|
||||
|
||||
tut_freeswitch
|
||||
tut_kamailio
|
||||
tut_opensips
|
||||
|
||||
@@ -486,7 +486,7 @@ func mailAsync(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) err
|
||||
message = []byte(fmt.Sprintf("To: %s\r\nSubject: [CGR Notification] Threshold hit on Balance: %s\r\n\r\nTime: \r\n\t%s\r\n\r\nBalance:\r\n\t%s\r\n\r\nYours faithfully,\r\nCGR Balance Monitor\r\n", toAddrStr, ub.Id, time.Now(), balJsn))
|
||||
} else if sq != nil {
|
||||
message = []byte(fmt.Sprintf("To: %s\r\nSubject: [CGR Notification] Threshold hit on StatsQueueId: %s\r\n\r\nTime: \r\n\t%s\r\n\r\nStatsQueueId:\r\n\t%s\r\n\r\nMetrics:\r\n\t%+v\r\n\r\nTrigger:\r\n\t%+v\r\n\r\nYours faithfully,\r\nCGR CDR Stats Monitor\r\n",
|
||||
toAddrStr, sq.Id, time.Now(), sq.Id, sq.metrics, sq.Trigger))
|
||||
toAddrStr, sq.Id, time.Now(), sq.Id, sq.Metrics, sq.Trigger))
|
||||
}
|
||||
auth := smtp.PlainAuth("", cgrCfg.MailerAuthUser, cgrCfg.MailerAuthPass, strings.Split(cgrCfg.MailerServer, ":")[0]) // We only need host part, so ignore port
|
||||
go func() {
|
||||
|
||||
@@ -349,22 +349,6 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// split on days
|
||||
/*for i := 0; i < len(timespans); i++ {
|
||||
if timespans[i].TimeStart.Day() != timespans[i].TimeEnd.Day() {
|
||||
//log.Print("TS: ", timespans[i].TimeStart, timespans[i].TimeEnd)
|
||||
start := timespans[i].TimeStart
|
||||
newTs := timespans[i].SplitByTime(time.Date(start.Year(), start.Month(), start.Day(), 0, 0, 0, 0, start.Location()).Add(24 * time.Hour))
|
||||
if newTs != nil {
|
||||
//log.Print("NEW TS: ", newTs.TimeStart, newTs.TimeEnd)
|
||||
// insert the new timespan
|
||||
index := i + 1
|
||||
timespans = append(timespans, nil)
|
||||
copy(timespans[index+1:], timespans[index:])
|
||||
timespans[index] = newTs
|
||||
}
|
||||
}
|
||||
}*/
|
||||
// Logger.Debug(fmt.Sprintf("After SplitByRatingPlan: %+v", timespans))
|
||||
// split on rate intervals
|
||||
for i := 0; i < len(timespans); i++ {
|
||||
@@ -796,7 +780,7 @@ func (cd *CallDescriptor) GetLCRFromStorage() (*LCR, error) {
|
||||
return nil, utils.ErrNotFound
|
||||
}
|
||||
|
||||
func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
func (cd *CallDescriptor) GetLCR(stats StatsInterface, p *utils.Paginator) (*LCRCost, error) {
|
||||
cd.account = nil // make sure it's not cached
|
||||
lcr, err := cd.GetLCRFromStorage()
|
||||
if err != nil {
|
||||
@@ -832,12 +816,13 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
lcrCD.Account = supplier
|
||||
lcrCD.Subject = supplier
|
||||
lcrCD.Category = lcrCost.Entry.RPCategory
|
||||
fullSupplier := utils.ConcatenatedKey(lcrCD.Direction, lcrCD.Tenant, lcrCD.Category, lcrCD.Subject)
|
||||
var cc *CallCost
|
||||
var err error
|
||||
if cd.account, err = accountingStorage.GetAccount(lcrCD.GetAccountKey()); err == nil {
|
||||
if cd.account.Disabled {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: fmt.Sprintf("supplier %s is disabled", supplier),
|
||||
})
|
||||
continue
|
||||
@@ -847,16 +832,15 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
cc, err = lcrCD.GetCost()
|
||||
|
||||
}
|
||||
supplier = utils.ConcatenatedKey(lcrCD.Direction, lcrCD.Tenant, lcrCD.Category, lcrCD.Subject)
|
||||
//log.Printf("CC: %+v", cc.Timespans[0].ratingInfo.RateIntervals[0].Rating.Rates[0])
|
||||
if err != nil || cc == nil {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Cost: cc.Cost,
|
||||
Duration: cc.GetDuration(),
|
||||
})
|
||||
@@ -879,6 +863,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
lcrCD.Category = category
|
||||
lcrCD.Account = supplier
|
||||
lcrCD.Subject = supplier
|
||||
fullSupplier := utils.ConcatenatedKey(lcrCD.Direction, lcrCD.Tenant, lcrCD.Category, lcrCD.Subject)
|
||||
var qosSortParams []string
|
||||
var asrValues sort.Float64Slice
|
||||
var pddValues sort.Float64Slice
|
||||
@@ -898,7 +883,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
if utils.IsSliceMember([]string{LCR_STRATEGY_QOS, LCR_STRATEGY_QOS_THRESHOLD, LCR_STRATEGY_LOAD}, lcrCost.Entry.Strategy) {
|
||||
if stats == nil {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: fmt.Sprintf("Cdr stats service not configured"),
|
||||
})
|
||||
continue
|
||||
@@ -906,7 +891,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
rpfKey := utils.ConcatenatedKey(ratingProfileSearchKey, supplier)
|
||||
if rpf, err := ratingStorage.GetRatingProfile(rpfKey, false); err != nil {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: fmt.Sprintf("Rating plan error: %s", err.Error()),
|
||||
})
|
||||
continue
|
||||
@@ -938,7 +923,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
statValues := make(map[string]float64)
|
||||
if err := stats.GetValues(qId, &statValues); err != nil {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: fmt.Sprintf("Get stats values for queue id %s, error %s", qId, err.Error()),
|
||||
})
|
||||
statsErr = true
|
||||
@@ -992,7 +977,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
if lcrCost.Entry.Strategy == LCR_STRATEGY_LOAD {
|
||||
if len(supplierQueues) > 0 {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
supplierQueues: supplierQueues,
|
||||
})
|
||||
}
|
||||
@@ -1072,7 +1057,7 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
//log.Print("ACCCOUNT")
|
||||
if cd.account.Disabled {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: fmt.Sprintf("supplier %s is disabled", supplier),
|
||||
})
|
||||
continue
|
||||
@@ -1083,16 +1068,15 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
cc, err = lcrCD.GetCost()
|
||||
}
|
||||
//log.Printf("CC: %+v", cc)
|
||||
supplier = utils.ConcatenatedKey(lcrCD.Direction, lcrCD.Tenant, lcrCD.Category, lcrCD.Subject)
|
||||
if err != nil || cc == nil {
|
||||
lcrCost.SupplierCosts = append(lcrCost.SupplierCosts, &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Error: err.Error(),
|
||||
})
|
||||
continue
|
||||
} else {
|
||||
supplCost := &LCRSupplierCost{
|
||||
Supplier: supplier,
|
||||
Supplier: fullSupplier,
|
||||
Cost: cc.Cost,
|
||||
Duration: cc.GetDuration(),
|
||||
}
|
||||
@@ -1128,5 +1112,13 @@ func (cd *CallDescriptor) GetLCR(stats StatsInterface) (*LCRCost, error) {
|
||||
// sort according to strategy
|
||||
lcrCost.Sort()
|
||||
}
|
||||
if p != nil {
|
||||
if p.Offset != nil && *p.Offset > 0 && *p.Offset < len(lcrCost.SupplierCosts) {
|
||||
lcrCost.SupplierCosts = lcrCost.SupplierCosts[*p.Offset:]
|
||||
}
|
||||
if p.Limit != nil && *p.Limit > 0 && *p.Limit < len(lcrCost.SupplierCosts) {
|
||||
lcrCost.SupplierCosts = lcrCost.SupplierCosts[:*p.Limit]
|
||||
}
|
||||
}
|
||||
return lcrCost, nil
|
||||
}
|
||||
|
||||
@@ -48,14 +48,16 @@ const (
|
||||
|
||||
// A request for LCR, used in APIer and SM where we need to expose it
|
||||
type LcrRequest struct {
|
||||
Direction string
|
||||
Tenant string
|
||||
Category string
|
||||
Account string
|
||||
Subject string
|
||||
Destination string
|
||||
StartTime string
|
||||
Duration string
|
||||
Direction string
|
||||
Tenant string
|
||||
Category string
|
||||
Account string
|
||||
Subject string
|
||||
Destination string
|
||||
SetupTime string
|
||||
Duration string
|
||||
IgnoreErrors bool
|
||||
*utils.Paginator
|
||||
}
|
||||
|
||||
func (self *LcrRequest) AsCallDescriptor() (*CallDescriptor, error) {
|
||||
@@ -77,9 +79,9 @@ func (self *LcrRequest) AsCallDescriptor() (*CallDescriptor, error) {
|
||||
}
|
||||
var timeStart time.Time
|
||||
var err error
|
||||
if len(self.StartTime) == 0 {
|
||||
if len(self.SetupTime) == 0 {
|
||||
timeStart = time.Now()
|
||||
} else if timeStart, err = utils.ParseTimeDetectLayout(self.StartTime); err != nil {
|
||||
} else if timeStart, err = utils.ParseTimeDetectLayout(self.SetupTime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var callDur time.Duration
|
||||
@@ -433,6 +435,9 @@ func (lc *LCRCost) SuppliersSlice() ([]string, error) {
|
||||
}
|
||||
supps := []string{}
|
||||
for _, supplCost := range lc.SupplierCosts {
|
||||
if supplCost.Error != "" {
|
||||
continue // Do not add the supplier with cost errors to list of suppliers available
|
||||
}
|
||||
if dtcs, err := utils.NewDTCSFromRPKey(supplCost.Supplier); err != nil {
|
||||
return nil, err
|
||||
} else if len(dtcs.Subject) != 0 {
|
||||
|
||||
@@ -204,7 +204,7 @@ func TestLcrGet(t *testing.T) {
|
||||
Account: "rif",
|
||||
Subject: "rif",
|
||||
}
|
||||
lcr, err := cd.GetLCR(nil)
|
||||
lcr, err := cd.GetLCR(nil, nil)
|
||||
//jsn, _ := json.Marshal(lcr)
|
||||
//log.Print("LCR: ", string(jsn))
|
||||
if err != nil || lcr == nil {
|
||||
@@ -215,11 +215,11 @@ func TestLcrGet(t *testing.T) {
|
||||
func TestLcrRequestAsCallDescriptor(t *testing.T) {
|
||||
sTime := time.Date(2015, 04, 06, 17, 40, 0, 0, time.UTC)
|
||||
callDur := time.Duration(1) * time.Minute
|
||||
lcrReq := &LcrRequest{Account: "2001", StartTime: sTime.String()}
|
||||
lcrReq := &LcrRequest{Account: "2001", SetupTime: sTime.String()}
|
||||
if _, err := lcrReq.AsCallDescriptor(); err == nil || err != utils.ErrMandatoryIeMissing {
|
||||
t.Error("Unexpected error received: %v", err)
|
||||
}
|
||||
lcrReq = &LcrRequest{Account: "2001", Destination: "2002", StartTime: sTime.String()}
|
||||
lcrReq = &LcrRequest{Account: "2001", Destination: "2002", SetupTime: sTime.String()}
|
||||
eCd := &CallDescriptor{
|
||||
Direction: utils.OUT,
|
||||
Tenant: config.CgrConfig().DefaultTenant,
|
||||
|
||||
@@ -41,6 +41,11 @@ type SessionRun struct {
|
||||
CallCosts []*CallCost
|
||||
}
|
||||
|
||||
type AttrGetLcr struct {
|
||||
*CallDescriptor
|
||||
*utils.Paginator
|
||||
}
|
||||
|
||||
type Responder struct {
|
||||
Bal *balancer2go.Balancer
|
||||
ExitChan chan bool
|
||||
@@ -386,20 +391,25 @@ func (rs *Responder) LogCallCost(ccl *CallCostLog, reply *string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *Responder) GetLCR(cd *CallDescriptor, reply *LCRCost) error {
|
||||
if cd.Subject == "" {
|
||||
cd.Subject = cd.Account
|
||||
func (rs *Responder) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error {
|
||||
if attrs.CallDescriptor.Subject == "" {
|
||||
attrs.CallDescriptor.Subject = attrs.CallDescriptor.Account
|
||||
}
|
||||
if upData, err := LoadUserProfile(cd, "ExtraFields"); err != nil {
|
||||
if upData, err := LoadUserProfile(attrs.CallDescriptor, "ExtraFields"); err != nil {
|
||||
return err
|
||||
} else {
|
||||
udRcv := upData.(*CallDescriptor)
|
||||
*cd = *udRcv
|
||||
*attrs.CallDescriptor = *udRcv
|
||||
}
|
||||
lcrCost, err := cd.GetLCR(rs.Stats)
|
||||
lcrCost, err := attrs.CallDescriptor.GetLCR(rs.Stats, attrs.Paginator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lcrCost.Entry.Strategy == LCR_STRATEGY_LOAD {
|
||||
for _, suppl := range lcrCost.SupplierCosts {
|
||||
suppl.Cost = -1 // In case of load distribution we don't calculate costs
|
||||
}
|
||||
}
|
||||
*reply = *lcrCost
|
||||
return nil
|
||||
}
|
||||
@@ -570,7 +580,7 @@ type Connector interface {
|
||||
GetSessionRuns(*StoredCdr, *[]*SessionRun) error
|
||||
ProcessCdr(*StoredCdr, *string) error
|
||||
LogCallCost(*CallCostLog, *string) error
|
||||
GetLCR(*CallDescriptor, *LCRCost) error
|
||||
GetLCR(*AttrGetLcr, *LCRCost) error
|
||||
GetTimeout() time.Duration
|
||||
}
|
||||
|
||||
@@ -619,8 +629,8 @@ func (rcc *RPCClientConnector) LogCallCost(ccl *CallCostLog, reply *string) erro
|
||||
return rcc.Client.Call("CDRSV1.LogCallCost", ccl, reply)
|
||||
}
|
||||
|
||||
func (rcc *RPCClientConnector) GetLCR(cd *CallDescriptor, reply *LCRCost) error {
|
||||
return rcc.Client.Call("Responder.GetLCR", cd, reply)
|
||||
func (rcc *RPCClientConnector) GetLCR(attrs *AttrGetLcr, reply *LCRCost) error {
|
||||
return rcc.Client.Call("Responder.GetLCR", attrs, reply)
|
||||
}
|
||||
|
||||
func (rcc *RPCClientConnector) GetTimeout() time.Duration {
|
||||
|
||||
@@ -395,7 +395,7 @@ func TestGetLCR(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var lcr LCRCost
|
||||
if err := rsponder.GetLCR(cdStatic, &lcr); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdStatic}, &lcr); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eStLcr.Entry, lcr.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eStLcr.Entry, lcr.Entry)
|
||||
@@ -422,7 +422,7 @@ func TestGetLCR(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var lcrLc LCRCost
|
||||
if err := rsponder.GetLCR(cdLowestCost, &lcrLc); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdLowestCost}, &lcrLc); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eLcLcr.Entry, lcrLc.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eLcLcr.Entry, lcrLc.Entry)
|
||||
@@ -447,7 +447,7 @@ func TestGetLCR(t *testing.T) {
|
||||
&LCRSupplierCost{Supplier: "*out:tenant12:call:dan12", Cost: 0.6, Duration: 60 * time.Second},
|
||||
},
|
||||
}
|
||||
if err := rsponder.GetLCR(cdLowestCost, &lcrLc); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdLowestCost}, &lcrLc); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eLcLcr.Entry, lcrLc.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eLcLcr.Entry, lcrLc.Entry)
|
||||
@@ -476,7 +476,7 @@ func TestGetLCR(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var lcrQT LCRCost
|
||||
if err := rsponder.GetLCR(cdQosThreshold, &lcrQT); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdQosThreshold}, &lcrQT); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eQTLcr.Entry, lcrQT.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eQTLcr.Entry, lcrQT.Entry)
|
||||
@@ -495,7 +495,7 @@ func TestGetLCR(t *testing.T) {
|
||||
&LCRSupplierCost{Supplier: "*out:tenant12:call:dan12", Cost: 0.6, Duration: 60 * time.Second, QOS: map[string]float64{PDD: -1, ACD: 300, TCD: 300, ASR: 100, ACC: 2, TCC: 2, DDC: 2}, qosSortParams: []string{"35", "4m"}},
|
||||
},
|
||||
}
|
||||
if err := rsponder.GetLCR(cdQosThreshold, &lcrQT); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdQosThreshold}, &lcrQT); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eQTLcr.Entry, lcrQT.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eQTLcr.Entry, lcrQT.Entry)
|
||||
@@ -524,7 +524,7 @@ func TestGetLCR(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var lcrQ LCRCost
|
||||
if err := rsponder.GetLCR(cdQos, &lcrQ); err != nil {
|
||||
if err := rsponder.GetLCR(&AttrGetLcr{CallDescriptor: cdQos}, &lcrQ); err != nil {
|
||||
t.Error(err)
|
||||
} else if !reflect.DeepEqual(eQosLcr.Entry, lcrQ.Entry) {
|
||||
t.Errorf("Expecting: %+v, received: %+v", eQosLcr.Entry, lcrQ.Entry)
|
||||
|
||||
@@ -215,12 +215,12 @@ func (sq *StatsQueue) GetId() string {
|
||||
|
||||
// Convert data into a struct which can be used in actions based on triggers hit
|
||||
func (sq *StatsQueue) Triggered(at *ActionTrigger) *StatsQueueTriggered {
|
||||
return &StatsQueueTriggered{Id: sq.conf.Id, metrics: sq.getStats(), Trigger: at}
|
||||
return &StatsQueueTriggered{Id: sq.conf.Id, Metrics: sq.getStats(), Trigger: at}
|
||||
}
|
||||
|
||||
// Struct to be passed to triggered actions
|
||||
type StatsQueueTriggered struct {
|
||||
Id string // StatsQueueId
|
||||
metrics map[string]float64
|
||||
Metrics map[string]float64
|
||||
Trigger *ActionTrigger
|
||||
}
|
||||
|
||||
@@ -416,22 +416,6 @@ func (ts *TimeSpan) SplitByRateInterval(i *RateInterval, data bool) (nts *TimeSp
|
||||
return
|
||||
}
|
||||
|
||||
/*func (ts *TimeSpan) SplitByTime(splitTime time.Time) (nts *TimeSpan) {
|
||||
if splitTime.Equal(ts.TimeEnd) {
|
||||
return
|
||||
}
|
||||
nts = &TimeSpan{
|
||||
TimeStart: splitTime,
|
||||
TimeEnd: ts.TimeEnd,
|
||||
}
|
||||
nts.copyRatingInfo(ts)
|
||||
ts.TimeEnd = splitTime
|
||||
nts.SetRateInterval(ts.RateInterval)
|
||||
nts.DurationIndex = ts.DurationIndex
|
||||
ts.SetNewDurationIndex(nts)
|
||||
return
|
||||
}*/
|
||||
|
||||
// Split the timespan at the given increment start
|
||||
func (ts *TimeSpan) SplitByIncrement(index int) *TimeSpan {
|
||||
if index <= 0 || index >= len(ts.Increments) {
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"net/rpc/jsonrpc"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -94,13 +95,11 @@ func TestTutFsCallsStartEngine(t *testing.T) {
|
||||
}
|
||||
|
||||
// Restart FS so we make sure reconnects are working
|
||||
|
||||
func TestTutFsCallsRestartFS(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
engine.KillProcName("freeswitch", 5000)
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "fs_evsock", "freeswitch", "etc", "init.d", "freeswitch"), "start", 3000); err != nil {
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "fs_evsock", "freeswitch", "etc", "init.d", "freeswitch"), "restart", 5000); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -174,6 +173,51 @@ func TestTutFsCallsAccountsBefore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all stats queues are in place
|
||||
func TestTutFsCallsCdrStatsBefore(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
//eQueueIds := []string{"*default", "CDRST1", "CDRST_1001", "CDRST_1002", "CDRST_1003", "STATS_SUPPL1", "STATS_SUPPL2"}
|
||||
var statMetrics map[string]float64
|
||||
eMetrics := map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACC: -1, engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1001"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1002"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1003"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutFsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL2"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Pjsua as listener and register it to receive calls
|
||||
func TestTutFsCallsStartPjsuaListener(t *testing.T) {
|
||||
if !*testCalls {
|
||||
@@ -181,12 +225,12 @@ func TestTutFsCallsStartPjsuaListener(t *testing.T) {
|
||||
}
|
||||
var err error
|
||||
acnts := []*engine.PjsuaAccount{
|
||||
&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:25060"}}
|
||||
&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"}}
|
||||
if tutFsCallsPjSuaListener, err = engine.StartPjsuaListener(acnts, 5070, *waitRater); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -198,7 +242,7 @@ func TestTutFsCallsCall1001To1002(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(67)*time.Second, 5071); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(67)*time.Second, 5071); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -209,7 +253,7 @@ func TestTutFsCallsCall1001To1003(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*"}, "sip:1003@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(65)*time.Second, 5072); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(65)*time.Second, 5072); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -219,7 +263,7 @@ func TestTutFsCallsCall1002To1001(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(61)*time.Second, 5073); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(61)*time.Second, 5073); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -229,7 +273,7 @@ func TestTutFsCallsCall1003To1001(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(63)*time.Second, 5074); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(63)*time.Second, 5074); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -239,7 +283,7 @@ func TestTutFsCallsCall1004To1001(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(62)*time.Second, 5075); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(62)*time.Second, 5075); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -249,7 +293,7 @@ func TestTutFsCallsCall1006To1002(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(64)*time.Second, 5076); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(64)*time.Second, 5076); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -259,7 +303,7 @@ func TestTutFsCallsCall1007To1002(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:25060", time.Duration(66)*time.Second, 5077); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(66)*time.Second, 5077); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -282,7 +326,7 @@ func TestTutFsCallsAccount1001(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCallsCdrs(t *testing.T) {
|
||||
func TestTutFsCalls1001Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
@@ -308,6 +352,9 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
//if reply[0].Supplier != "suppl2" { // Usage as seconds
|
||||
// t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
//}
|
||||
}
|
||||
// Make sure call cost contains the matched information
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCallCostLog", utils.AttrGetCallCost{CgrId: cgrId}, &cCost); err != nil {
|
||||
@@ -352,7 +399,16 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Subject for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCalls1002Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -371,7 +427,15 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCalls1003Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -390,7 +454,16 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCalls1004Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -409,7 +482,16 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCalls1006Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -431,7 +513,15 @@ func TestTutFsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutFsCalls1007Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutFsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
|
||||
@@ -72,13 +72,13 @@ func TestTutKamCallsResetStorDb(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// start Kam server
|
||||
func TestTutKamCallsStartKam(t *testing.T) {
|
||||
// start FS server
|
||||
func TestTutKamCallsStartKamailio(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
engine.KillProcName("kamailio", *waitRater)
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "kamevapi", "kamailio", "etc", "init.d", "kamailio"), "start", 3000); err != nil {
|
||||
engine.KillProcName("kamailio", 3000)
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "kamevapi", "kamailio", "etc", "init.d", "kamailio"), "start", 2000); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -94,12 +94,12 @@ func TestTutKamCallsStartEngine(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Restart Kam so we make sure reconnects are working
|
||||
func TestTutKamCallsRestartKam(t *testing.T) {
|
||||
// Restart FS so we make sure reconnects are working
|
||||
func TestTutKamCallsRestartKamailio(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "kamevapi", "kamailio", "etc", "init.d", "kamailio"), "restart", 4000); err != nil {
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "kamevapi", "kamailio", "etc", "init.d", "kamailio"), "restart", 3000); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func TestTutKamCallsLoadTariffPlanFromFolder(t *testing.T) {
|
||||
time.Sleep(time.Duration(*waitRater) * time.Millisecond) // Give time for scheduler to execute topups
|
||||
}
|
||||
|
||||
// Make sure account was topped-up properly
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCallsAccountsBefore(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
@@ -180,13 +180,7 @@ func TestTutKamCallsCdrStatsBefore(t *testing.T) {
|
||||
}
|
||||
//eQueueIds := []string{"*default", "CDRST1", "CDRST_1001", "CDRST_1002", "CDRST_1003", "STATS_SUPPL1", "STATS_SUPPL2"}
|
||||
var statMetrics map[string]float64
|
||||
eMetrics := map[string]float64{engine.ACC: -1, engine.ACD: -1, engine.ASR: -1}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: utils.META_DEFAULT}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
eMetrics := map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
@@ -253,12 +247,23 @@ func TestTutKamCallsCall1001To1002(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Call from 1001 (prepaid) to 1003
|
||||
func TestTutKamCallsCall1001To1003(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "CGRateS.org", Realm: "*"}, "sip:1003@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(65)*time.Second, 5072); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTutKamCallsCall1002To1001(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(61)*time.Second, 5072); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(61)*time.Second, 5073); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -268,7 +273,7 @@ func TestTutKamCallsCall1003To1001(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(63)*time.Second, 5073); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(63)*time.Second, 5074); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -278,7 +283,7 @@ func TestTutKamCallsCall1004To1001(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(62)*time.Second, 5074); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(62)*time.Second, 5075); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -288,7 +293,7 @@ func TestTutKamCallsCall1006To1002(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "CGRateS.org", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(64)*time.Second, 5075); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(64)*time.Second, 5076); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -298,42 +303,7 @@ func TestTutKamCallsCall1007To1002(t *testing.T) {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "CGRateS.org", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(66)*time.Second, 5076); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// Should hangup at 62 seconds, disconnect from SM
|
||||
func TestTutKamCallsCall1007To1007(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "CGRateS.org", Realm: "*"}, "sip:1007@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(75)*time.Second, 5077); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Should hangup at 62 seconds, disconnect from Kamailio
|
||||
func TestTutKamCallsCall1003To1007(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "CGRateS.org", Realm: "*"}, "sip:1007@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(73)*time.Second, 5078); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// Call from 1001 (prepaid) to 1007, should not cost more than 62 which is MaxCallCost
|
||||
func TestTutKamCallsCall1001To1007(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "CGRateS.org", Realm: "*"}, "sip:1007@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(70)*time.Second, 5079); err != nil {
|
||||
"sip:127.0.0.1:5060", time.Duration(66)*time.Second, 5077); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -343,7 +313,7 @@ func TestTutKamCallsAccount1001(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Duration(80) * time.Second) // Allow calls to finish before start querying the results
|
||||
time.Sleep(time.Duration(70) * time.Second) // Allow calls to finish before start querying the results
|
||||
var reply *engine.Account
|
||||
attrs := &utils.AttrGetAccount{Tenant: "cgrates.org", Account: "1001", Direction: "*out"}
|
||||
if err := tutKamCallsRpc.Call("ApierV1.GetAccount", attrs, &reply); err != nil {
|
||||
@@ -356,17 +326,20 @@ func TestTutKamCallsAccount1001(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCallsCdrs(t *testing.T) {
|
||||
func TestTutKamCalls1001Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1001"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
var cgrId string // Share with getCostDetails
|
||||
var cCost engine.CallCost
|
||||
req := utils.RpcCdrsFilter{RunIds: []string{utils.META_DEFAULT}, Accounts: []string{"1001"}, DestPrefixes: []string{"1002"}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 2 {
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
cgrId = reply[0].CgrId
|
||||
if reply[0].CdrSource != "KAMAILIO_CGR_CALL_END" {
|
||||
t.Errorf("Unexpected CdrSource for CDR: %+v", reply[0])
|
||||
}
|
||||
@@ -374,11 +347,44 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected ReqType for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "67" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for 428CDR: %+v", reply[0])
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl2" { // Usage as seconds
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
//if reply[0].Supplier != "suppl2" { // Usage as seconds
|
||||
// t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
//}
|
||||
}
|
||||
// Make sure call cost contains the matched information
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCallCostLog", utils.AttrGetCallCost{CgrId: cgrId}, &cCost); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if utils.IsSliceMember([]string{cCost.Timespans[0].MatchedSubject, cCost.Timespans[0].MatchedPrefix, cCost.Timespans[0].MatchedDestId}, "") {
|
||||
t.Errorf("Unexpected Matched* for CallCost: %+v", cCost.Timespans[0])
|
||||
}
|
||||
|
||||
req = utils.RpcCdrsFilter{RunIds: []string{utils.META_DEFAULT}, Accounts: []string{"1001"}, DestPrefixes: []string{"1003"}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
cgrId = reply[0].CgrId
|
||||
if reply[0].ReqType != utils.META_PREPAID {
|
||||
t.Errorf("Unexpected ReqType for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "65" && reply[0].Usage != "66" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost != 0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
// Make sure call cost contains the matched information
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCallCostLog", utils.AttrGetCallCost{CgrId: cgrId}, &cCost); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if utils.IsSliceMember([]string{cCost.Timespans[0].MatchedSubject, cCost.Timespans[0].MatchedPrefix, cCost.Timespans[0].MatchedDestId}, "") {
|
||||
t.Errorf("Unexpected Matched* for CallCost: %+v", cCost.Timespans[0])
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1001"}, RunIds: []string{"derived_run1"}, FilterOnRated: true}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
@@ -392,11 +398,17 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
if reply[0].Subject != "1002" {
|
||||
t.Errorf("Unexpected Subject for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl2" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCalls1002Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -414,11 +426,16 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
if reply[0].Usage != "61" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl1" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCalls1003Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -433,14 +450,20 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1001" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "63" { // Usage as seconds
|
||||
if reply[0].Usage != "63" && reply[0].Usage != "64" { // Usage as seconds, sometimes takes a second longer to disconnect
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl1" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCalls1004Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -455,14 +478,20 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1001" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "62" { // Usage as seconds
|
||||
if reply[0].Usage != "62" && reply[0].Usage != "63" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl1" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCalls1006Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -477,84 +506,42 @@ func TestTutKamCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1002" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "64" { // Usage as seconds
|
||||
if reply[0].Usage != "64" && reply[0].Usage != "65" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl3" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
if reply[0].CdrSource != "KAMAILIO_CGR_CALL_END" {
|
||||
t.Errorf("Unexpected CdrSource for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].ReqType != utils.META_PREPAID {
|
||||
t.Errorf("Unexpected ReqType for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Destination != "1002" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "66" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Supplier != "suppl3" {
|
||||
t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all stats queues were updated
|
||||
func TestTutKamCallsCdrStatsAfter(t *testing.T) {
|
||||
// Make sure account was debited properly
|
||||
func TestTutKamCalls1007Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var statMetrics map[string]float64
|
||||
eMetrics := map[string]float64{engine.ACC: 0.9015222222, engine.ACD: 65.5555555556, engine.ASR: 100}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: utils.META_DEFAULT}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACC: 0.8829, engine.ACD: 64.7142857143, engine.ASR: 100, engine.TCC: 6.1803, engine.TCD: 453}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.TCC: 6.1803, engine.TCD: 453, engine.ACC: 0.32, engine.ACD: 68.5, engine.ASR: 100}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1001"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: 61, engine.ASR: 100, engine.TCC: 6.1803, engine.TCD: 453, engine.ACC: 1.2334}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1002"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.TCC: 6.1803, engine.TCD: 453, engine.ACC: 1.2334, engine.ACD: -1, engine.ASR: -1}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1003"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACC: 1.00404, engine.ACD: 65.2, engine.ASR: 100, engine.TCC: 5.0202, engine.TCD: 326}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: 67, engine.ASR: 100, engine.TCC: 1.2534, engine.TCD: 134, engine.ACC: 0.6267}
|
||||
if err := tutKamCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL2"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutKamCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
if reply[0].CdrSource != "KAMAILIO_CGR_CALL_END" {
|
||||
t.Errorf("Unexpected CdrSource for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].ReqType != utils.META_PREPAID {
|
||||
t.Errorf("Unexpected ReqType for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Destination != "1002" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "66" && reply[0].Usage != "67" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestTutLocalCacheStats(t *testing.T) {
|
||||
}
|
||||
var rcvStats *utils.CacheStats
|
||||
expectedStats := &utils.CacheStats{Destinations: 4, RatingPlans: 3, RatingProfiles: 8, Actions: 7, SharedGroups: 1, RatingAliases: 1, AccountAliases: 1,
|
||||
DerivedChargers: 1, LcrProfiles: 4, CdrStats: 6, Users: 2}
|
||||
DerivedChargers: 1, LcrProfiles: 5, CdrStats: 6, Users: 2}
|
||||
var args utils.AttrCacheStats
|
||||
if err := tutLocalRpc.Call("ApierV1.GetCacheStats", args, &rcvStats); err != nil {
|
||||
t.Error("Got error on ApierV1.GetCacheStats: ", err.Error())
|
||||
@@ -898,8 +898,8 @@ func TestTutLocalLeastCost(t *testing.T) {
|
||||
Direction: "*out",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1004",
|
||||
Account: "1004",
|
||||
Subject: "1005",
|
||||
Account: "1005",
|
||||
Destination: "1002",
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tEnd,
|
||||
@@ -924,8 +924,8 @@ func TestTutLocalLeastCost(t *testing.T) {
|
||||
Direction: "*out",
|
||||
Category: "call",
|
||||
Tenant: "cgrates.org",
|
||||
Subject: "1004",
|
||||
Account: "1004",
|
||||
Subject: "1005",
|
||||
Account: "1005",
|
||||
Destination: "1003",
|
||||
TimeStart: tStart,
|
||||
TimeEnd: tEnd,
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"net/rpc/jsonrpc"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -71,13 +72,13 @@ func TestTutOsipsCallsResetStorDb(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// start Kam server
|
||||
// start FS server
|
||||
func TestTutOsipsCallsStartOsips(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
engine.KillProcName("opensips", *waitRater)
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "osips_async", "opensips", "etc", "init.d", "opensips"), "start", 100); err != nil {
|
||||
engine.KillProcName("opensips", 3000)
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "osips_async", "opensips", "etc", "init.d", "opensips"), "start", 3000); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -93,12 +94,12 @@ func TestTutOsipsCallsStartEngine(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Restart Kam so we make sure reconnects are working
|
||||
func TestTutOsipsCallsRestartKam(t *testing.T) {
|
||||
// Restart FS so we make sure reconnects are working
|
||||
func TestTutOsipsCallsRestartOsips(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "osips_async", "opensips", "etc", "init.d", "opensips"), "restart", 200); err != nil {
|
||||
if err := engine.CallScript(path.Join(*dataDir, "tutorials", "osips_async", "opensips", "etc", "init.d", "opensips"), "restart", 3000); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -172,6 +173,51 @@ func TestTutOsipsCallsAccountsBefore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all stats queues are in place
|
||||
func TestTutOsipsCallsCdrStatsBefore(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
//eQueueIds := []string{"*default", "CDRST1", "CDRST_1001", "CDRST_1002", "CDRST_1003", "STATS_SUPPL1", "STATS_SUPPL2"}
|
||||
var statMetrics map[string]float64
|
||||
eMetrics := map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACC: -1, engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1001"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1002"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "CDRST_1003"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL1"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
eMetrics = map[string]float64{engine.ACD: -1, engine.ASR: -1, engine.TCC: -1, engine.TCD: -1, engine.ACC: -1}
|
||||
if err := tutOsipsCallsRpc.Call("CDRStatsV1.GetMetrics", v1.AttrGetMetrics{StatsQueueId: "STATS_SUPPL2"}, &statMetrics); err != nil {
|
||||
t.Error("Calling CDRStatsV1.GetMetrics, got error: ", err.Error())
|
||||
} else if !reflect.DeepEqual(eMetrics, statMetrics) {
|
||||
t.Errorf("Expecting: %v, received: %v", eMetrics, statMetrics)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Pjsua as listener and register it to receive calls
|
||||
func TestTutOsipsCallsStartPjsuaListener(t *testing.T) {
|
||||
if !*testCalls {
|
||||
@@ -179,12 +225,12 @@ func TestTutOsipsCallsStartPjsuaListener(t *testing.T) {
|
||||
}
|
||||
var err error
|
||||
acnts := []*engine.PjsuaAccount{
|
||||
&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "CGRateS.org", Realm: "*", Registrar: "sip:127.0.0.1:5060"}}
|
||||
&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"},
|
||||
&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "1234", Realm: "*", Registrar: "sip:127.0.0.1:5060"}}
|
||||
if tutOsipsCallsPjSuaListener, err = engine.StartPjsuaListener(acnts, 5070, *waitRater); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -195,18 +241,29 @@ func TestTutOsipsCallsCall1001To1002(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "CGRateS.org", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(67)*time.Second, 5071); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call from 1001 (prepaid) to 1003
|
||||
func TestTutOsipsCallsCall1001To1003(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1001@127.0.0.1", Username: "1001", Password: "1234", Realm: "*"}, "sip:1003@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(65)*time.Second, 5072); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTutOsipsCallsCall1002To1001(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(61)*time.Second, 5072); err != nil {
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1002@127.0.0.1", Username: "1002", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(61)*time.Second, 5073); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -215,8 +272,8 @@ func TestTutOsipsCallsCall1003To1001(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(63)*time.Second, 5073); err != nil {
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1003@127.0.0.1", Username: "1003", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(63)*time.Second, 5074); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -225,8 +282,8 @@ func TestTutOsipsCallsCall1004To1001(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "CGRateS.org", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(62)*time.Second, 5074); err != nil {
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1004@127.0.0.1", Username: "1004", Password: "1234", Realm: "*"}, "sip:1001@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(62)*time.Second, 5075); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -235,8 +292,8 @@ func TestTutOsipsCallsCall1006To1002(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "CGRateS.org", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(64)*time.Second, 5075); err != nil {
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1006@127.0.0.1", Username: "1006", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(64)*time.Second, 5076); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -245,8 +302,8 @@ func TestTutOsipsCallsCall1007To1002(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "CGRateS.org", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(66)*time.Second, 5076); err != nil {
|
||||
if err := engine.PjsuaCallUri(&engine.PjsuaAccount{Id: "sip:1007@127.0.0.1", Username: "1007", Password: "1234", Realm: "*"}, "sip:1002@127.0.0.1",
|
||||
"sip:127.0.0.1:5060", time.Duration(66)*time.Second, 5077); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -269,17 +326,20 @@ func TestTutOsipsCallsAccount1001(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
func TestTutOsipsCalls1001Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1001"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
var cgrId string // Share with getCostDetails
|
||||
var cCost engine.CallCost
|
||||
req := utils.RpcCdrsFilter{RunIds: []string{utils.META_DEFAULT}, Accounts: []string{"1001"}, DestPrefixes: []string{"1002"}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
cgrId = reply[0].CgrId
|
||||
if reply[0].CdrSource != "OSIPS_E_ACC_EVENT" {
|
||||
t.Errorf("Unexpected CdrSource for CDR: %+v", reply[0])
|
||||
}
|
||||
@@ -289,11 +349,47 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
if reply[0].Usage != "67" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
//if reply[0].Supplier != "suppl2" { // Usage as seconds
|
||||
// t.Errorf("Unexpected Supplier for CDR: %+v", reply[0])
|
||||
//}
|
||||
}
|
||||
// Make sure call cost contains the matched information
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCallCostLog", utils.AttrGetCallCost{CgrId: cgrId}, &cCost); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if utils.IsSliceMember([]string{cCost.Timespans[0].MatchedSubject, cCost.Timespans[0].MatchedPrefix, cCost.Timespans[0].MatchedDestId}, "") {
|
||||
t.Errorf("Unexpected Matched* for CallCost: %+v", cCost.Timespans[0])
|
||||
}
|
||||
|
||||
req = utils.RpcCdrsFilter{RunIds: []string{utils.META_DEFAULT}, Accounts: []string{"1001"}, DestPrefixes: []string{"1003"}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
cgrId = reply[0].CgrId
|
||||
if reply[0].ReqType != utils.META_PREPAID {
|
||||
t.Errorf("Unexpected ReqType for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "65" && reply[0].Usage != "66" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost != 0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
// Make sure call cost contains the matched information
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCallCostLog", utils.AttrGetCallCost{CgrId: cgrId}, &cCost); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if utils.IsSliceMember([]string{cCost.Timespans[0].MatchedSubject, cCost.Timespans[0].MatchedPrefix, cCost.Timespans[0].MatchedDestId}, "") {
|
||||
t.Errorf("Unexpected Matched* for CallCost: %+v", cCost.Timespans[0])
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1001"}, RunIds: []string{"derived_run1"}, FilterOnRated: true}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
} else if len(reply) != 2 {
|
||||
t.Error("Unexpected number of CDRs returned: ", len(reply))
|
||||
} else {
|
||||
if reply[0].ReqType != utils.META_RATED {
|
||||
@@ -303,7 +399,16 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Subject for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCalls1002Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1002"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -322,7 +427,15 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCalls1003Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1003"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -337,11 +450,20 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1001" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "63" { // Usage as seconds
|
||||
if reply[0].Usage != "63" && reply[0].Usage != "64" { // Usage as seconds, sometimes takes a second longer to disconnect
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCalls1004Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1004"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -360,7 +482,16 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCalls1006Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1006"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -375,11 +506,22 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1002" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "64" { // Usage as seconds
|
||||
if reply[0].Usage != "64" && reply[0].Usage != "65" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
req = utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
}
|
||||
|
||||
// Make sure account was debited properly
|
||||
func TestTutOsipsCalls1007Cdrs(t *testing.T) {
|
||||
if !*testCalls {
|
||||
return
|
||||
}
|
||||
var reply []*engine.ExternalCdr
|
||||
req := utils.RpcCdrsFilter{Accounts: []string{"1007"}, RunIds: []string{utils.META_DEFAULT}}
|
||||
if err := tutOsipsCallsRpc.Call("ApierV2.GetCdrs", req, &reply); err != nil {
|
||||
t.Error("Unexpected error: ", err.Error())
|
||||
} else if len(reply) != 1 {
|
||||
@@ -394,9 +536,12 @@ func TestTutOsipsCallsCdrs(t *testing.T) {
|
||||
if reply[0].Destination != "1002" {
|
||||
t.Errorf("Unexpected Destination for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Usage != "66" { // Usage as seconds
|
||||
if reply[0].Usage != "66" && reply[0].Usage != "67" { // Usage as seconds
|
||||
t.Errorf("Unexpected Usage for CDR: %+v", reply[0])
|
||||
}
|
||||
if reply[0].Cost == -1.0 { // Cost was not calculated
|
||||
t.Errorf("Unexpected Cost for CDR: %+v", reply[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
cgrates (0.9.1~rc7) UNRELEASED; urgency=low
|
||||
|
||||
* RC7.
|
||||
|
||||
-- DanB <danb@cgrates.org> Wednesday, 3 August 2015 14:04:00 -0600
|
||||
|
||||
cgrates (0.9.1~rc6) UNRELEASED; urgency=low
|
||||
|
||||
* RC6.
|
||||
|
||||
@@ -9,6 +9,6 @@ Homepage: http://cgrates.org
|
||||
Package: cgrates
|
||||
Architecture: amd64
|
||||
Suggests: git, redis-server, mysql-server
|
||||
Version: 0.9.1-rc6
|
||||
Version: 0.9.1-rc7
|
||||
Description: Carrier Grade Real-time Charging System
|
||||
CGRateS is a very fast and easy scalable real-time charging system for Telecom environments.
|
||||
|
||||
@@ -386,7 +386,7 @@ func (fsev FSEvent) AsCallDescriptor() (*engine.CallDescriptor, error) {
|
||||
Account: fsev.GetAccount(utils.META_DEFAULT),
|
||||
Subject: fsev.GetSubject(utils.META_DEFAULT),
|
||||
Destination: fsev.GetDestination(utils.META_DEFAULT),
|
||||
StartTime: utils.FirstNonEmpty(fsev[SETUP_TIME], fsev[ANSWER_TIME]),
|
||||
SetupTime: utils.FirstNonEmpty(fsev[SETUP_TIME], fsev[ANSWER_TIME]),
|
||||
Duration: fsev[DURATION],
|
||||
}
|
||||
return lcrReq.AsCallDescriptor()
|
||||
|
||||
@@ -193,7 +193,7 @@ func (sm *FSSessionManager) setCgrLcr(ev engine.Event, connId string) error {
|
||||
TimeStart: startTime,
|
||||
TimeEnd: startTime.Add(config.CgrConfig().MaxCallDuration),
|
||||
}
|
||||
if err := sm.rater.GetLCR(cd, &lcrCost); err != nil {
|
||||
if err := sm.rater.GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcrCost); err != nil {
|
||||
return err
|
||||
}
|
||||
supps := []string{}
|
||||
@@ -238,7 +238,7 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) {
|
||||
return
|
||||
}
|
||||
var lcr engine.LCRCost
|
||||
if err = sm.Rater().GetLCR(cd, &lcr); err != nil {
|
||||
if err = sm.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_API_ERROR: %s", err.Error()))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR)
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func (self *KamailioSessionManager) getSuppliers(kev KamEvent) (string, error) {
|
||||
return "", errors.New("LCR_PREPROCESS_ERROR")
|
||||
}
|
||||
var lcr engine.LCRCost
|
||||
if err = self.Rater().GetLCR(cd, &lcr); err != nil {
|
||||
if err = self.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-Kamailio> LCR_API_ERROR error: %s", err.Error()))
|
||||
return "", errors.New("LCR_API_ERROR")
|
||||
}
|
||||
|
||||
@@ -381,7 +381,7 @@ func (kev KamEvent) AsCallDescriptor() (*engine.CallDescriptor, error) {
|
||||
Account: kev.GetAccount(utils.META_DEFAULT),
|
||||
Subject: kev.GetSubject(utils.META_DEFAULT),
|
||||
Destination: kev.GetDestination(utils.META_DEFAULT),
|
||||
StartTime: utils.FirstNonEmpty(kev[CGR_SETUPTIME], kev[CGR_ANSWERTIME]),
|
||||
SetupTime: utils.FirstNonEmpty(kev[CGR_SETUPTIME], kev[CGR_ANSWERTIME]),
|
||||
Duration: kev[CGR_DURATION],
|
||||
}
|
||||
return lcrReq.AsCallDescriptor()
|
||||
|
||||
@@ -99,7 +99,7 @@ func (mc *MockConnector) GetDerivedMaxSessionTime(*engine.StoredCdr, *float64) e
|
||||
func (mc *MockConnector) GetSessionRuns(*engine.StoredCdr, *[]*engine.SessionRun) error { return nil }
|
||||
func (mc *MockConnector) ProcessCdr(*engine.StoredCdr, *string) error { return nil }
|
||||
func (mc *MockConnector) LogCallCost(*engine.CallCostLog, *string) error { return nil }
|
||||
func (mc *MockConnector) GetLCR(*engine.CallDescriptor, *engine.LCRCost) error { return nil }
|
||||
func (mc *MockConnector) GetLCR(*engine.AttrGetLcr, *engine.LCRCost) error { return nil }
|
||||
func (mc *MockConnector) GetTimeout() time.Duration { return 0 }
|
||||
|
||||
func TestSessionRefund(t *testing.T) {
|
||||
|
||||
@@ -1080,7 +1080,8 @@ type AttrSetAccount struct {
|
||||
Direction string
|
||||
Account string
|
||||
ActionPlanId string
|
||||
AllowNegative bool
|
||||
AllowNegative *bool
|
||||
Disabled *bool
|
||||
}
|
||||
|
||||
type AttrRemoveAccount struct {
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
VERSION = "0.9.1~rc6"
|
||||
VERSION = "0.9.1~rc7"
|
||||
POSTGRES = "postgres"
|
||||
MYSQL = "mysql"
|
||||
MONGO = "mongo"
|
||||
|
||||
Reference in New Issue
Block a user