mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-19 22:28:45 +05:00
New HttpPoster implementation, moved Logger to utils package
This commit is contained in:
@@ -464,7 +464,7 @@ func (self *ApierV1) LoadTariffPlanFromStorDb(attrs AttrLoadTpFromStorDb, reply
|
||||
for idx, dc := range dcs {
|
||||
dcsKeys[idx] = utils.DERIVEDCHARGERS_PREFIX + dc
|
||||
}
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromStorDb, reloading cache.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromStorDb, reloading cache.")
|
||||
if err := self.RatingDb.CacheRatingPrefixValues(map[string][]string{
|
||||
utils.DESTINATION_PREFIX: dstKeys,
|
||||
utils.RATING_PLAN_PREFIX: rpKeys,
|
||||
@@ -483,7 +483,7 @@ func (self *ApierV1) LoadTariffPlanFromStorDb(attrs AttrLoadTpFromStorDb, reply
|
||||
}
|
||||
aps, _ := dbReader.GetLoadedIds(utils.ACTION_TIMING_PREFIX)
|
||||
if len(aps) != 0 && self.Sched != nil {
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromStorDb, reloading scheduler.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromStorDb, reloading scheduler.")
|
||||
self.Sched.LoadActionPlans(self.RatingDb)
|
||||
self.Sched.Restart()
|
||||
}
|
||||
@@ -1010,7 +1010,7 @@ func (self *ApierV1) GetCacheStats(attrs utils.AttrCacheStats, reply *utils.Cach
|
||||
}
|
||||
if loadHistInsts, err := self.AccountDb.GetLoadHistory(1, false); err != nil || len(loadHistInsts) == 0 {
|
||||
if err != nil { // Not really an error here since we only count in cache
|
||||
engine.Logger.Err(fmt.Sprintf("ApierV1.GetCacheStats, error on GetLoadHistory: %s"))
|
||||
utils.Logger.Err(fmt.Sprintf("ApierV1.GetCacheStats, error on GetLoadHistory: %s"))
|
||||
}
|
||||
cs.LastLoadId = utils.NOT_AVAILABLE
|
||||
cs.LastLoadTime = utils.NOT_AVAILABLE
|
||||
@@ -1148,7 +1148,7 @@ func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder,
|
||||
dcsKeys[idx] = utils.DERIVEDCHARGERS_PREFIX + dc
|
||||
}
|
||||
aps, _ := loader.GetLoadedIds(utils.ACTION_TIMING_PREFIX)
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading cache.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading cache.")
|
||||
|
||||
if err := self.RatingDb.CacheRatingPrefixValues(map[string][]string{
|
||||
utils.DESTINATION_PREFIX: dstKeys,
|
||||
@@ -1167,7 +1167,7 @@ func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder,
|
||||
return err
|
||||
}
|
||||
if len(aps) != 0 && self.Sched != nil {
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading scheduler.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading scheduler.")
|
||||
self.Sched.LoadActionPlans(self.RatingDb)
|
||||
self.Sched.Restart()
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
|
||||
"github.com/cgrates/cgrates/cdre"
|
||||
"github.com/cgrates/cgrates/config"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
@@ -224,7 +223,7 @@ func (apier *ApierV1) ReloadCdreConfig(attrs AttrReloadConfig, reply *string) er
|
||||
cdreReloadStruct := <-apier.Config.ConfigReloads[utils.CDRE] // Get the CDRE reload channel // Read the content of the channel, locking it
|
||||
apier.Config.CdreProfiles = newCfg.CdreProfiles
|
||||
apier.Config.ConfigReloads[utils.CDRE] <- cdreReloadStruct // Unlock reloads
|
||||
engine.Logger.Info("<CDRE> Configuration reloaded")
|
||||
utils.Logger.Info("<CDRE> Configuration reloaded")
|
||||
*reply = OK
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (self *ApierV1) GetLcr(lcrReq engine.LcrRequest, lcrReply *engine.LcrReply)
|
||||
lcrReply.Strategy = lcrQried.Entry.Strategy
|
||||
for _, qriedSuppl := range lcrQried.SupplierCosts {
|
||||
if qriedSuppl.Error != "" {
|
||||
engine.Logger.Err(fmt.Sprintf("LCR_ERROR: supplier <%s>, error <%s>", qriedSuppl.Supplier, qriedSuppl.Error))
|
||||
utils.Logger.Err(fmt.Sprintf("LCR_ERROR: supplier <%s>, error <%s>", qriedSuppl.Supplier, qriedSuppl.Error))
|
||||
if !lcrReq.IgnoreErrors {
|
||||
return fmt.Errorf("%s:%s", utils.ErrServerError.Error(), "LCR_COMPUTE_ERRORS")
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ func (self *ApierV2) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder,
|
||||
dcsKeys[idx] = utils.DERIVEDCHARGERS_PREFIX + dc
|
||||
}
|
||||
aps, _ := loader.GetLoadedIds(utils.ACTION_TIMING_PREFIX)
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading cache.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading cache.")
|
||||
|
||||
if err := self.RatingDb.CacheRatingPrefixValues(map[string][]string{
|
||||
utils.DESTINATION_PREFIX: dstKeys,
|
||||
@@ -235,7 +235,7 @@ func (self *ApierV2) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder,
|
||||
return err
|
||||
}
|
||||
if len(aps) != 0 && self.Sched != nil {
|
||||
engine.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading scheduler.")
|
||||
utils.Logger.Info("ApierV1.LoadTariffPlanFromFolder, reloading scheduler.")
|
||||
self.Sched.LoadActionPlans(self.RatingDb)
|
||||
self.Sched.Restart()
|
||||
}
|
||||
|
||||
30
cdrc/cdrc.go
30
cdrc/cdrc.go
@@ -179,7 +179,7 @@ func (self *Cdrc) Run() error {
|
||||
for {
|
||||
select {
|
||||
case <-self.closeChan: // Exit, reinject closeChan for other CDRCs
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> Shutting down CDRC on path %s.", self.cdrInDir))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> Shutting down CDRC on path %s.", self.cdrInDir))
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
@@ -199,35 +199,35 @@ func (self *Cdrc) trackCDRFiles() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> Monitoring %s for file moves.", self.cdrInDir))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> Monitoring %s for file moves.", self.cdrInDir))
|
||||
for {
|
||||
select {
|
||||
case <-self.closeChan: // Exit, reinject closeChan for other CDRCs
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> Shutting down CDRC on path %s.", self.cdrInDir))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> Shutting down CDRC on path %s.", self.cdrInDir))
|
||||
return nil
|
||||
case ev := <-watcher.Events:
|
||||
if ev.Op&fsnotify.Create == fsnotify.Create && (self.cdrFormat != FS_CSV || path.Ext(ev.Name) != ".csv") {
|
||||
go func() { //Enable async processing here
|
||||
if err = self.processFile(ev.Name); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("Processing file %s, error: %s", ev.Name, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Processing file %s, error: %s", ev.Name, err.Error()))
|
||||
}
|
||||
}()
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
engine.Logger.Err(fmt.Sprintf("Inotify error: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Inotify error: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// One run over the CDR folder
|
||||
func (self *Cdrc) processCdrDir() error {
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> Parsing folder %s for CDR files.", self.cdrInDir))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> Parsing folder %s for CDR files.", self.cdrInDir))
|
||||
filesInDir, _ := ioutil.ReadDir(self.cdrInDir)
|
||||
for _, file := range filesInDir {
|
||||
if self.cdrFormat != FS_CSV || path.Ext(file.Name()) != ".csv" {
|
||||
go func() { //Enable async processing here
|
||||
if err := self.processFile(path.Join(self.cdrInDir, file.Name())); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("Processing file %s, error: %s", file, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Processing file %s, error: %s", file, err.Error()))
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -242,11 +242,11 @@ func (self *Cdrc) processFile(filePath string) error {
|
||||
defer func() { self.maxOpenFiles <- processFile }()
|
||||
}
|
||||
_, fn := path.Split(filePath)
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> Parsing: %s", filePath))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> Parsing: %s", filePath))
|
||||
file, err := os.Open(filePath)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
engine.Logger.Crit(err.Error())
|
||||
utils.Logger.Crit(err.Error())
|
||||
return err
|
||||
}
|
||||
var recordsProcessor RecordsProcessor
|
||||
@@ -270,19 +270,19 @@ func (self *Cdrc) processFile(filePath string) error {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Row %d, error: %s", rowNr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Row %d, error: %s", rowNr, err.Error()))
|
||||
continue
|
||||
}
|
||||
for _, storedCdr := range cdrs { // Send CDRs to CDRS
|
||||
var reply string
|
||||
if self.dfltCdrcCfg.DryRun {
|
||||
engine.Logger.Info(fmt.Sprintf("<Cdrc> DryRun CDR: %+v", storedCdr))
|
||||
utils.Logger.Info(fmt.Sprintf("<Cdrc> DryRun CDR: %+v", storedCdr))
|
||||
continue
|
||||
}
|
||||
if err := self.cdrs.ProcessCdr(storedCdr, &reply); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Failed sending CDR, %+v, error: %s", storedCdr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Failed sending CDR, %+v, error: %s", storedCdr, err.Error()))
|
||||
} else if reply != "OK" {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Received unexpected reply for CDR, %+v, reply: %s", storedCdr, reply))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Received unexpected reply for CDR, %+v, reply: %s", storedCdr, reply))
|
||||
}
|
||||
cdrsPosted += 1
|
||||
}
|
||||
@@ -290,10 +290,10 @@ func (self *Cdrc) processFile(filePath string) error {
|
||||
// Finished with file, move it to processed folder
|
||||
newPath := path.Join(self.cdrOutDir, fn)
|
||||
if err := os.Rename(filePath, newPath); err != nil {
|
||||
engine.Logger.Err(err.Error())
|
||||
utils.Logger.Err(err.Error())
|
||||
return err
|
||||
}
|
||||
engine.Logger.Info(fmt.Sprintf("Finished processing %s, moved to %s. Total records processed: %d, CDRs posted: %d, run duration: %s",
|
||||
utils.Logger.Info(fmt.Sprintf("Finished processing %s, moved to %s. Total records processed: %d, CDRs posted: %d, run duration: %s",
|
||||
fn, newPath, recordsProcessor.ProcessedRecordsNr(), cdrsPosted, time.Now().Sub(timeStart)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,14 +111,14 @@ func (self *PartialRecordsCache) dumpUnpairedRecords(fileName string) error {
|
||||
unpairedFilePath := path.Join(self.cdrOutDir, fileName+UNPAIRED_SUFFIX)
|
||||
fileOut, err := os.Create(unpairedFilePath)
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Failed creating %s, error: %s", unpairedFilePath, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Failed creating %s, error: %s", unpairedFilePath, err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
csvWriter := csv.NewWriter(fileOut)
|
||||
csvWriter.Comma = self.csvSep
|
||||
for _, pr := range self.partialRecords[fileName] {
|
||||
if err := csvWriter.Write(pr.Values); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Failed writing unpaired record %v to file: %s, error: %s", pr, unpairedFilePath, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Failed writing unpaired record %v to file: %s, error: %s", pr, unpairedFilePath, err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
12
cdrc/fwv.go
12
cdrc/fwv.go
@@ -87,12 +87,12 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.StoredCdr, error
|
||||
defer func() { self.offset += self.lineLen }() // Schedule increasing the offset once we are out from processing the record
|
||||
if self.offset == 0 { // First time, set the necessary offsets
|
||||
if err := self.setLineLen(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error: cannot set lineLen: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error: cannot set lineLen: %s", err.Error()))
|
||||
return nil, io.EOF
|
||||
}
|
||||
if len(self.dfltCfg.TrailerFields) != 0 {
|
||||
if fi, err := self.file.Stat(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error: cannot get file stats: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error: cannot get file stats: %s", err.Error()))
|
||||
return nil, err
|
||||
} else {
|
||||
self.trailerOffset = fi.Size() - self.lineLen
|
||||
@@ -100,7 +100,7 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.StoredCdr, error
|
||||
}
|
||||
if len(self.dfltCfg.HeaderFields) != 0 { // ToDo: Process here the header fields
|
||||
if err := self.processHeader(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error reading header: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Row 0, error reading header: %s", err.Error()))
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, nil
|
||||
@@ -109,7 +109,7 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.StoredCdr, error
|
||||
recordCdrs := make([]*engine.StoredCdr, 0) // More CDRs based on the number of filters and field templates
|
||||
if self.trailerOffset != 0 && self.offset >= self.trailerOffset {
|
||||
if err := self.processTrailer(); err != nil && err != io.EOF {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Read trailer error: %s ", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Read trailer error: %s ", err.Error()))
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
@@ -118,7 +118,7 @@ func (self *FwvRecordsProcessor) ProcessNextRecord() ([]*engine.StoredCdr, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if nRead != len(buf) {
|
||||
engine.Logger.Err(fmt.Sprintf("<Cdrc> Could not read complete line, have instead: %s", string(buf)))
|
||||
utils.Logger.Err(fmt.Sprintf("<Cdrc> Could not read complete line, have instead: %s", string(buf)))
|
||||
return nil, io.EOF
|
||||
}
|
||||
self.processedRecordsNr += 1
|
||||
@@ -247,6 +247,6 @@ func (self *FwvRecordsProcessor) processTrailer() error {
|
||||
} else if nRead != len(buf) {
|
||||
return fmt.Errorf("In trailer, line len: %d, have read: %d", self.lineLen, nRead)
|
||||
}
|
||||
//engine.Logger.Debug(fmt.Sprintf("Have read trailer: <%q>", string(buf)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("Have read trailer: <%q>", string(buf)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -267,12 +267,12 @@ func (cdre *CdrExporter) composeHeader() error {
|
||||
return fmt.Errorf("Unsupported field type: %s", cfgFld.Type)
|
||||
}
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR header, field %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR header, field %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
return err
|
||||
}
|
||||
fmtOut := outVal
|
||||
if fmtOut, err = FmtFieldWidth(outVal, cfgFld.Width, cfgFld.Strip, cfgFld.Padding, cfgFld.Mandatory); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR header, field %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR header, field %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
return err
|
||||
}
|
||||
cdre.header = append(cdre.header, fmtOut)
|
||||
@@ -296,12 +296,12 @@ func (cdre *CdrExporter) composeTrailer() error {
|
||||
return fmt.Errorf("Unsupported field type: %s", cfgFld.Type)
|
||||
}
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR trailer, field: %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR trailer, field: %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
return err
|
||||
}
|
||||
fmtOut := outVal
|
||||
if fmtOut, err = FmtFieldWidth(outVal, cfgFld.Width, cfgFld.Strip, cfgFld.Padding, cfgFld.Mandatory); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR trailer, field: %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR trailer, field: %s, error: %s", cfgFld.Tag, err.Error()))
|
||||
return err
|
||||
}
|
||||
cdre.trailer = append(cdre.trailer, fmtOut)
|
||||
@@ -362,12 +362,12 @@ func (cdre *CdrExporter) processCdr(cdr *engine.StoredCdr) error {
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR with cgrid: %s and runid: %s, error: %s", cdr.CgrId, cdr.MediationRunId, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR with cgrid: %s and runid: %s, error: %s", cdr.CgrId, cdr.MediationRunId, err.Error()))
|
||||
return err
|
||||
}
|
||||
fmtOut := outVal
|
||||
if fmtOut, err = FmtFieldWidth(outVal, cfgFld.Width, cfgFld.Strip, cfgFld.Padding, cfgFld.Mandatory); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR with cgrid: %s, runid: %s, fieldName: %s, fieldValue: %s, error: %s", cdr.CgrId, cdr.MediationRunId, cfgFld.Tag, outVal, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CdreFw> Cannot export CDR with cgrid: %s, runid: %s, fieldName: %s, fieldValue: %s, error: %s", cdr.CgrId, cdr.MediationRunId, cfgFld.Tag, outVal, err.Error()))
|
||||
return err
|
||||
}
|
||||
cdrRow[idx] += fmtOut
|
||||
|
||||
@@ -80,7 +80,7 @@ func startCdrcs(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan
|
||||
break
|
||||
case <-cfg.ConfigReloads[utils.CDRC]: // Consume the load request and wait for a new one
|
||||
if cdrcInitialized {
|
||||
engine.Logger.Info("<CDRC> Configuration reload")
|
||||
utils.Logger.Info("<CDRC> Configuration reload")
|
||||
close(cdrcChildrenChan) // Stop all the children of the previous run
|
||||
}
|
||||
cdrcChildrenChan = make(chan struct{})
|
||||
@@ -118,7 +118,7 @@ func startCdrc(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan *
|
||||
} else {
|
||||
conn, err := rpcclient.NewRpcClient("tcp", cdrcCfg.Cdrs, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRC> Could not connect to CDRS via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRC> Could not connect to CDRS via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -126,18 +126,18 @@ func startCdrc(internalCdrSChan chan *engine.CdrServer, internalRaterChan chan *
|
||||
}
|
||||
cdrc, err := cdrc.NewCdrc(cdrcCfgs, httpSkipTlsCheck, cdrsConn, closeChan, cfg.DefaultTimezone)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Cdrc config parsing error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("Cdrc config parsing error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
if err := cdrc.Run(); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Cdrc run error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("Cdrc run error: %s", err.Error()))
|
||||
exitChan <- true // If run stopped, something is bad, stop the application
|
||||
}
|
||||
}
|
||||
|
||||
func startSmFreeSWITCH(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) {
|
||||
engine.Logger.Info("Starting CGRateS SM-FreeSWITCH service.")
|
||||
utils.Logger.Info("Starting CGRateS SM-FreeSWITCH service.")
|
||||
var raterConn, cdrsConn engine.Connector
|
||||
var client *rpcclient.RpcClient
|
||||
var err error
|
||||
@@ -150,7 +150,7 @@ func startSmFreeSWITCH(internalRaterChan chan *engine.Responder, cdrDb engine.Cd
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil { //Connected so no need to reiterate
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -169,7 +169,7 @@ func startSmFreeSWITCH(internalRaterChan chan *engine.Responder, cdrDb engine.Cd
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -181,13 +181,13 @@ func startSmFreeSWITCH(internalRaterChan chan *engine.Responder, cdrDb engine.Cd
|
||||
sms = append(sms, sm)
|
||||
smRpc.SMs = append(smRpc.SMs, sm)
|
||||
if err = sm.Connect(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SessionManager> error: %s!", err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SessionManager> error: %s!", err))
|
||||
}
|
||||
exitChan <- true
|
||||
}
|
||||
|
||||
func startSmKamailio(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) {
|
||||
engine.Logger.Info("Starting CGRateS SM-Kamailio service.")
|
||||
utils.Logger.Info("Starting CGRateS SM-Kamailio service.")
|
||||
var raterConn, cdrsConn engine.Connector
|
||||
var client *rpcclient.RpcClient
|
||||
var err error
|
||||
@@ -200,7 +200,7 @@ func startSmKamailio(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil { //Connected so no need to reiterate
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func startSmKamailio(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -231,13 +231,13 @@ func startSmKamailio(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
sms = append(sms, sm)
|
||||
smRpc.SMs = append(smRpc.SMs, sm)
|
||||
if err = sm.Connect(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SessionManager> error: %s!", err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SessionManager> error: %s!", err))
|
||||
}
|
||||
exitChan <- true
|
||||
}
|
||||
|
||||
func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrStorage, exitChan chan bool) {
|
||||
engine.Logger.Info("Starting CGRateS SM-OpenSIPS service.")
|
||||
utils.Logger.Info("Starting CGRateS SM-OpenSIPS service.")
|
||||
var raterConn, cdrsConn engine.Connector
|
||||
var client *rpcclient.RpcClient
|
||||
var err error
|
||||
@@ -250,7 +250,7 @@ func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", raterCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil { //Connected so no need to reiterate
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to rater via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -269,7 +269,7 @@ func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cdrsCfg.Server, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("<SM-FreeSWITCH> Could not connect to CDRS via RPC: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -281,7 +281,7 @@ func startSmOpenSIPS(internalRaterChan chan *engine.Responder, cdrDb engine.CdrS
|
||||
sms = append(sms, sm)
|
||||
smRpc.SMs = append(smRpc.SMs, sm)
|
||||
if err := sm.Connect(); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> error: %s!", err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> error: %s!", err))
|
||||
}
|
||||
exitChan <- true
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
internalRaterChan chan *engine.Responder, internalPubSubSChan chan engine.PublisherSubscriber,
|
||||
internalUserSChan chan engine.UserService, internalAliaseSChan chan engine.AliasService,
|
||||
internalCdrStatSChan chan engine.StatsInterface, server *engine.Server, exitChan chan bool) {
|
||||
engine.Logger.Info("Starting CGRateS CDRS service.")
|
||||
utils.Logger.Info("Starting CGRateS CDRS service.")
|
||||
var err error
|
||||
var client *rpcclient.RpcClient
|
||||
// Rater connection init
|
||||
@@ -302,7 +302,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
} else if len(cfg.CDRSRater) != 0 {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSRater, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to rater: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to rater: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSPubSub, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to pubsub server: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to pubsub server: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSUsers, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to users server: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to users server: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -358,7 +358,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSAliases, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to aliases server: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to aliases server: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -377,7 +377,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
} else {
|
||||
client, err = rpcclient.NewRpcClient("tcp", cfg.CDRSStats, cfg.ConnectAttempts, cfg.Reconnects, utils.GOB)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to stats server: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<CDRS> Could not connect to stats server: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -386,9 +386,9 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
}
|
||||
|
||||
cdrServer, _ := engine.NewCdrServer(cfg, cdrDb, raterConn, pubSubConn, usersConn, aliasesConn, statsConn)
|
||||
engine.Logger.Info("Registering CDRS HTTP Handlers.")
|
||||
utils.Logger.Info("Registering CDRS HTTP Handlers.")
|
||||
cdrServer.RegisterHanlersToServer(server)
|
||||
engine.Logger.Info("Registering CDRS RPC service.")
|
||||
utils.Logger.Info("Registering CDRS RPC service.")
|
||||
cdrSrv := v1.CdrsV1{CdrSrv: cdrServer}
|
||||
server.RpcRegister(&cdrSrv)
|
||||
server.RpcRegister(&v2.CdrsV2{CdrsV1: cdrSrv})
|
||||
@@ -400,7 +400,7 @@ func startCDRS(internalCdrSChan chan *engine.CdrServer, logDb engine.LogStorage,
|
||||
}
|
||||
|
||||
func startScheduler(internalSchedulerChan chan *scheduler.Scheduler, ratingDb engine.RatingStorage, exitChan chan bool) {
|
||||
engine.Logger.Info("Starting CGRateS Scheduler.")
|
||||
utils.Logger.Info("Starting CGRateS Scheduler.")
|
||||
sched := scheduler.NewScheduler()
|
||||
go reloadSchedulerSingnalHandler(sched, ratingDb)
|
||||
time.Sleep(1)
|
||||
@@ -420,7 +420,7 @@ func startCdrStats(internalCdrStatSChan chan engine.StatsInterface, ratingDb eng
|
||||
func startHistoryServer(internalHistorySChan chan history.Scribe, server *engine.Server, exitChan chan bool) {
|
||||
scribeServer, err := history.NewFileScribe(cfg.HistoryDir, cfg.HistorySaveInterval)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<HistoryServer> Could not start, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<HistoryServer> Could not start, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
}
|
||||
server.RpcRegisterName("ScribeV1", scribeServer)
|
||||
@@ -438,7 +438,7 @@ func startAliasesServer(internalAliaseSChan chan engine.AliasService, accountDb
|
||||
aliasesServer := engine.NewAliasHandler(accountDb)
|
||||
server.RpcRegisterName("AliasesV1", aliasesServer)
|
||||
if err := accountDb.CacheAccountingPrefixes(utils.ALIASES_PREFIX); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Aliases> Could not start, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Aliases> Could not start, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -448,7 +448,7 @@ func startAliasesServer(internalAliaseSChan chan engine.AliasService, accountDb
|
||||
func startUsersServer(internalUserSChan chan engine.UserService, accountDb engine.AccountingStorage, server *engine.Server, exitChan chan bool) {
|
||||
userServer, err := engine.NewUserMap(accountDb, cfg.UserServerIndexes)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<UsersService> Could not start, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<UsersService> Could not start, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func startRpc(server *engine.Server, internalRaterChan chan *engine.Responder,
|
||||
}
|
||||
|
||||
func writePid() {
|
||||
engine.Logger.Info(*pidFile)
|
||||
utils.Logger.Info(*pidFile)
|
||||
f, err := os.Create(*pidFile)
|
||||
if err != nil {
|
||||
log.Fatal("Could not write pid file: ", err)
|
||||
@@ -518,7 +518,7 @@ func main() {
|
||||
}
|
||||
cfg, err = config.NewCGRConfigFromFolder(*cfgDir)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Could not parse config: %s exiting!", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("Could not parse config: %s exiting!", err))
|
||||
return
|
||||
}
|
||||
config.SetCgrConfig(cfg) // Share the config object
|
||||
@@ -540,7 +540,7 @@ func main() {
|
||||
ratingDb, err = engine.ConfigureRatingStorage(cfg.TpDbType, cfg.TpDbHost, cfg.TpDbPort,
|
||||
cfg.TpDbName, cfg.TpDbUser, cfg.TpDbPass, cfg.DBDataEncoding)
|
||||
if err != nil { // Cannot configure getter database, show stopper
|
||||
engine.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
|
||||
return
|
||||
}
|
||||
defer ratingDb.Close()
|
||||
@@ -548,7 +548,7 @@ func main() {
|
||||
accountDb, err = engine.ConfigureAccountingStorage(cfg.DataDbType, cfg.DataDbHost, cfg.DataDbPort,
|
||||
cfg.DataDbName, cfg.DataDbUser, cfg.DataDbPass, cfg.DBDataEncoding)
|
||||
if err != nil { // Cannot configure getter database, show stopper
|
||||
engine.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("Could not configure dataDb: %s exiting!", err))
|
||||
return
|
||||
}
|
||||
defer accountDb.Close()
|
||||
@@ -558,7 +558,7 @@ func main() {
|
||||
logDb, err = engine.ConfigureLogStorage(cfg.StorDBType, cfg.StorDBHost, cfg.StorDBPort,
|
||||
cfg.StorDBName, cfg.StorDBUser, cfg.StorDBPass, cfg.DBDataEncoding, cfg.StorDBMaxOpenConns, cfg.StorDBMaxIdleConns)
|
||||
if err != nil { // Cannot configure logger database, show stopper
|
||||
engine.Logger.Crit(fmt.Sprintf("Could not configure logger database: %s exiting!", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("Could not configure logger database: %s exiting!", err))
|
||||
return
|
||||
}
|
||||
defer logDb.Close()
|
||||
@@ -668,8 +668,8 @@ func main() {
|
||||
|
||||
if *pidFile != "" {
|
||||
if err := os.Remove(*pidFile); err != nil {
|
||||
engine.Logger.Warning("Could not remove pid file: " + err.Error())
|
||||
utils.Logger.Warning("Could not remove pid file: " + err.Error())
|
||||
}
|
||||
}
|
||||
engine.Logger.Info("Stopped all components. CGRateS shutdown!")
|
||||
utils.Logger.Info("Stopped all components. CGRateS shutdown!")
|
||||
}
|
||||
|
||||
@@ -53,12 +53,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
go func() {
|
||||
defer close(cacheTaskChan)
|
||||
if err := ratingDb.CacheRatingAll(); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Cache rating error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("Cache rating error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
if err := accountDb.CacheAccountingPrefixes(); err != nil { // Used to cache load history
|
||||
engine.Logger.Crit(fmt.Sprintf("Cache accounting error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("Cache accounting error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case sched = <-internalSchedulerChan:
|
||||
internalSchedulerChan <- sched
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal scheduler connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal scheduler connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case bal = <-internalBalancerChan:
|
||||
internalBalancerChan <- bal // Put it back if someone else is interested about
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal balancer connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal balancer connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -120,12 +120,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case cdrStats = <-internalCdrStatSChan:
|
||||
internalCdrStatSChan <- cdrStats
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal cdrstats connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal cdrstats connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
} else if cdrStats, err = engine.NewProxyStats(cfg.RaterCdrStats, cfg.ConnectAttempts, -1); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to cdrstats, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to cdrstats, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -144,12 +144,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case scribeServer = <-internalHistorySChan:
|
||||
internalHistorySChan <- scribeServer
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal historys connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal historys connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
} else if scribeServer, err = history.NewProxyScribe(cfg.RaterHistoryServer, cfg.ConnectAttempts, -1); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Rater> Could not connect historys, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Rater> Could not connect historys, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -169,12 +169,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case pubSubServer = <-internalPubSubSChan:
|
||||
internalPubSubSChan <- pubSubServer
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal pubsub connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal pubsub connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
} else if pubSubServer, err = engine.NewProxyPubSub(cfg.RaterPubSubServer, cfg.ConnectAttempts, -1); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to pubsubs: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to pubsubs: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -194,12 +194,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case aliasesServer = <-internalAliaseSChan:
|
||||
internalAliaseSChan <- aliasesServer
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal aliases connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal aliases connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
} else if aliasesServer, err = engine.NewProxyAliasService(cfg.RaterAliasesServer, cfg.ConnectAttempts, -1); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to aliases, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Rater> Could not connect to aliases, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
@@ -219,12 +219,12 @@ func startRater(internalRaterChan chan *engine.Responder, internalBalancerChan c
|
||||
case userServer = <-internalUserSChan:
|
||||
internalUserSChan <- userServer
|
||||
case <-time.After(cfg.InternalTtl):
|
||||
engine.Logger.Crit("<Rater>: Internal users connection timeout.")
|
||||
utils.Logger.Crit("<Rater>: Internal users connection timeout.")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
} else if userServer, err = engine.NewProxyUserService(cfg.RaterUserServer, cfg.ConnectAttempts, -1); err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("<Rater> Could not connect users, error: %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("<Rater> Could not connect users, error: %s", err.Error()))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/cgrates/cgrates/balancer2go"
|
||||
"github.com/cgrates/cgrates/engine"
|
||||
"github.com/cgrates/cgrates/scheduler"
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -37,7 +38,7 @@ func stopBalancerSignalHandler(bal *balancer2go.Balancer, exitChan chan bool) {
|
||||
c := make(chan os.Signal)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)
|
||||
sig := <-c
|
||||
engine.Logger.Info(fmt.Sprintf("Caught signal %v, sending shutdown to engines\n", sig))
|
||||
utils.Logger.Info(fmt.Sprintf("Caught signal %v, sending shutdown to engines\n", sig))
|
||||
bal.Shutdown("Responder.Shutdown")
|
||||
exitChan <- true
|
||||
}
|
||||
@@ -47,7 +48,7 @@ func generalSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exitC
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)
|
||||
|
||||
sig := <-c
|
||||
engine.Logger.Info(fmt.Sprintf("Caught signal %v, shuting down cgr-engine\n", sig))
|
||||
utils.Logger.Info(fmt.Sprintf("Caught signal %v, shuting down cgr-engine\n", sig))
|
||||
var dummyInt int
|
||||
select {
|
||||
case cdrStats := <-internalCdrStatSChan:
|
||||
@@ -66,7 +67,7 @@ func stopRaterSignalHandler(internalCdrStatSChan chan engine.StatsInterface, exi
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)
|
||||
sig := <-c
|
||||
|
||||
engine.Logger.Info(fmt.Sprintf("Caught signal %v, unregistering from balancer\n", sig))
|
||||
utils.Logger.Info(fmt.Sprintf("Caught signal %v, unregistering from balancer\n", sig))
|
||||
unregisterFromBalancer(exitChan)
|
||||
var dummyInt int
|
||||
select {
|
||||
@@ -83,15 +84,15 @@ Connects to the balancer and calls unregister RPC method.
|
||||
func unregisterFromBalancer(exitChan chan bool) {
|
||||
client, err := rpc.Dial("tcp", cfg.RaterBalancer)
|
||||
if err != nil {
|
||||
engine.Logger.Crit("Cannot contact the balancer!")
|
||||
utils.Logger.Crit("Cannot contact the balancer!")
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
var reply int
|
||||
engine.Logger.Info(fmt.Sprintf("Unregistering from balancer %s", cfg.RaterBalancer))
|
||||
utils.Logger.Info(fmt.Sprintf("Unregistering from balancer %s", cfg.RaterBalancer))
|
||||
client.Call("Responder.UnRegisterRater", cfg.RPCGOBListen, &reply)
|
||||
if err := client.Close(); err != nil {
|
||||
engine.Logger.Crit("Could not close balancer unregistration!")
|
||||
utils.Logger.Crit("Could not close balancer unregistration!")
|
||||
exitChan <- true
|
||||
}
|
||||
}
|
||||
@@ -102,18 +103,18 @@ Connects to the balancer and rehisters the engine to the server.
|
||||
func registerToBalancer(exitChan chan bool) {
|
||||
client, err := rpc.Dial("tcp", cfg.RaterBalancer)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Cannot contact the balancer: %v", err))
|
||||
utils.Logger.Crit(fmt.Sprintf("Cannot contact the balancer: %v", err))
|
||||
exitChan <- true
|
||||
return
|
||||
}
|
||||
var reply int
|
||||
engine.Logger.Info(fmt.Sprintf("Registering to balancer %s", cfg.RaterBalancer))
|
||||
utils.Logger.Info(fmt.Sprintf("Registering to balancer %s", cfg.RaterBalancer))
|
||||
client.Call("Responder.RegisterRater", cfg.RPCGOBListen, &reply)
|
||||
if err := client.Close(); err != nil {
|
||||
engine.Logger.Crit("Could not close balancer registration!")
|
||||
utils.Logger.Crit("Could not close balancer registration!")
|
||||
exitChan <- true
|
||||
}
|
||||
engine.Logger.Info("Registration finished!")
|
||||
utils.Logger.Info("Registration finished!")
|
||||
}
|
||||
|
||||
// Listens for the HUP system signal and gracefuly reloads the timers from database.
|
||||
@@ -123,7 +124,7 @@ func reloadSchedulerSingnalHandler(sched *scheduler.Scheduler, getter engine.Rat
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
sig := <-c
|
||||
|
||||
engine.Logger.Info(fmt.Sprintf("Caught signal %v, reloading action timings.\n", sig))
|
||||
utils.Logger.Info(fmt.Sprintf("Caught signal %v, reloading action timings.\n", sig))
|
||||
sched.LoadActionPlans(getter)
|
||||
// check the tip of the queue for new actions
|
||||
sched.Restart()
|
||||
@@ -140,7 +141,7 @@ func shutdownSessionmanagerSingnalHandler(exitChan chan bool) {
|
||||
|
||||
for _, sm := range sms {
|
||||
if err := sm.Shutdown(); err != nil {
|
||||
engine.Logger.Warning(fmt.Sprintf("<SessionManager> %s", err))
|
||||
utils.Logger.Warning(fmt.Sprintf("<SessionManager> %s", err))
|
||||
}
|
||||
}
|
||||
exitChan <- true
|
||||
|
||||
@@ -134,7 +134,7 @@ func (ub *Account) debitBalanceAction(a *Action, reset bool) error {
|
||||
sg, err := ratingStorage.GetSharedGroup(a.Balance.SharedGroup, false)
|
||||
if err != nil || sg == nil {
|
||||
//than problem
|
||||
Logger.Warning(fmt.Sprintf("Could not get shared group: %v", a.Balance.SharedGroup))
|
||||
utils.Logger.Warning(fmt.Sprintf("Could not get shared group: %v", a.Balance.SharedGroup))
|
||||
} else {
|
||||
if !utils.IsSliceMember(sg.MemberIds, ub.Id) {
|
||||
// add member and save
|
||||
@@ -229,7 +229,7 @@ func (account *Account) getAlldBalancesForPrefix(destination, category, balanceT
|
||||
if b.SharedGroup != "" {
|
||||
sharedGroup, err := ratingStorage.GetSharedGroup(b.SharedGroup, false)
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Could not get shared group: %v", b.SharedGroup))
|
||||
utils.Logger.Warning(fmt.Sprintf("Could not get shared group: %v", b.SharedGroup))
|
||||
continue
|
||||
}
|
||||
sharedBalances := sharedGroup.GetBalances(destination, category, balanceType, account)
|
||||
@@ -345,7 +345,7 @@ func (ub *Account) debitCreditBalance(cd *CallDescriptor, count bool, dryRun boo
|
||||
//log.Printf("After balances CD: %+v", cd)
|
||||
leftCC, err = cd.getCost()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error getting new cost for balance subject: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error getting new cost for balance subject: %v", err))
|
||||
}
|
||||
if leftCC.Cost == 0 && len(leftCC.Timespans) > 0 {
|
||||
cc.Timespans = append(cc.Timespans, leftCC.Timespans...)
|
||||
@@ -363,7 +363,7 @@ func (ub *Account) debitCreditBalance(cd *CallDescriptor, count bool, dryRun boo
|
||||
// get the default money balanance
|
||||
// and go negative on it with the amount still unpaid
|
||||
if len(leftCC.Timespans) > 0 && leftCC.Cost > 0 && !ub.AllowNegative && !dryRun {
|
||||
Logger.Err(fmt.Sprintf("<Rater> Going negative on account %s with AllowNegative: false", cd.GetAccountKey()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Rater> Going negative on account %s with AllowNegative: false", cd.GetAccountKey()))
|
||||
}
|
||||
for _, ts := range leftCC.Timespans {
|
||||
if ts.Increments == nil {
|
||||
@@ -627,7 +627,7 @@ func (account *Account) GetUniqueSharedGroupMembers(cd *CallDescriptor) ([]strin
|
||||
for _, sgID := range sharedGroupIds {
|
||||
sharedGroup, err := ratingStorage.GetSharedGroup(sgID, false)
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Could not get shared group: %v", sgID))
|
||||
utils.Logger.Warning(fmt.Sprintf("Could not get shared group: %v", sgID))
|
||||
return nil, err
|
||||
}
|
||||
for _, memberId := range sharedGroup.MemberIds {
|
||||
|
||||
@@ -136,11 +136,11 @@ func getActionFunc(typ string) (actionTypeFunc, bool) {
|
||||
func logAction(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) (err error) {
|
||||
if ub != nil {
|
||||
body, _ := json.Marshal(ub)
|
||||
Logger.Info(fmt.Sprintf("Threshold hit, Balance: %s", body))
|
||||
utils.Logger.Info(fmt.Sprintf("Threshold hit, Balance: %s", body))
|
||||
}
|
||||
if sq != nil {
|
||||
body, _ := json.Marshal(sq)
|
||||
Logger.Info(fmt.Sprintf("Threshold hit, StatsQueue: %s", body))
|
||||
utils.Logger.Info(fmt.Sprintf("Threshold hit, StatsQueue: %s", body))
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func cdrLogAction(acc *Account, sq *StatsQueueTriggered, a *Action, acs Actions)
|
||||
cdr.ExtraFields[key] = parsedValue
|
||||
}
|
||||
}
|
||||
//Logger.Debug(fmt.Sprintf("account: %+v, action: %+v, balance: %+v", acc, action, action.Balance))
|
||||
//utils.Logger.Debug(fmt.Sprintf("account: %+v, action: %+v, balance: %+v", acc, action, action.Balance))
|
||||
cdrs = append(cdrs, cdr)
|
||||
if cdrStorage == nil { // Only save if the cdrStorage is defined
|
||||
continue
|
||||
@@ -463,7 +463,7 @@ func callUrlAsync(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions)
|
||||
if _, err := utils.HttpJsonPost(a.ExtraParameters, cfg.HttpSkipTlsVerify, o); err == nil {
|
||||
break // Success, no need to reinterate
|
||||
} else if i == 4 { // Last iteration, syslog the warning
|
||||
Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed calling url: [%s], error: [%s], triggered: %s", a.ExtraParameters, err.Error(), o))
|
||||
utils.Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed calling url: [%s], error: [%s], triggered: %s", a.ExtraParameters, err.Error(), o))
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(i) * time.Minute)
|
||||
@@ -506,9 +506,9 @@ func mailAsync(ub *Account, sq *StatsQueueTriggered, a *Action, acs Actions) err
|
||||
break
|
||||
} else if i == 4 {
|
||||
if ub != nil {
|
||||
Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed emailing, params: [%s], error: [%s], BalanceId: %s", a.ExtraParameters, err.Error(), ub.Id))
|
||||
utils.Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed emailing, params: [%s], error: [%s], BalanceId: %s", a.ExtraParameters, err.Error(), ub.Id))
|
||||
} else if sq != nil {
|
||||
Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed emailing, params: [%s], error: [%s], StatsQueueTriggeredId: %s", a.ExtraParameters, err.Error(), sq.Id))
|
||||
utils.Logger.Warning(fmt.Sprintf("<Triggers> WARNING: Failed emailing, params: [%s], error: [%s], StatsQueueTriggeredId: %s", a.ExtraParameters, err.Error(), sq.Id))
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func (at *ActionPlan) GetNextStartTimeOld(now time.Time) (t time.Time) {
|
||||
var err error
|
||||
t, err = time.Parse(FORMAT, l)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Cannot parse action plan's StartTime %v", l))
|
||||
utils.Logger.Err(fmt.Sprintf("Cannot parse action plan's StartTime %v", l))
|
||||
at.stCache = t
|
||||
return
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func (at *ActionPlan) Execute() (err error) {
|
||||
at.resetStartTimeCache()
|
||||
aac, err := at.getActions()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Failed to get actions for %s: %s", at.ActionsId, err))
|
||||
utils.Logger.Err(fmt.Sprintf("Failed to get actions for %s: %s", at.ActionsId, err))
|
||||
return
|
||||
}
|
||||
for _, a := range aac {
|
||||
@@ -253,12 +253,12 @@ func (at *ActionPlan) Execute() (err error) {
|
||||
for _, accId := range at.AccountIds {
|
||||
_, err := Guardian.Guard(func() (interface{}, error) {
|
||||
if err := accountingStorage.RemoveAccount(accId); err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Could not remove account Id: %s: %d", accId, err))
|
||||
utils.Logger.Warning(fmt.Sprintf("Could not remove account Id: %s: %d", accId, err))
|
||||
}
|
||||
return 0, nil
|
||||
}, 0, accId)
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Error executing action plan: %v", err))
|
||||
utils.Logger.Warning(fmt.Sprintf("Error executing action plan: %v", err))
|
||||
}
|
||||
}
|
||||
continue // do not go to getActionFunc
|
||||
@@ -268,26 +268,26 @@ func (at *ActionPlan) Execute() (err error) {
|
||||
if !exists {
|
||||
// do not allow the action plan to be rescheduled
|
||||
at.Timing = nil
|
||||
Logger.Crit(fmt.Sprintf("Function type %v not available, aborting execution!", a.ActionType))
|
||||
utils.Logger.Crit(fmt.Sprintf("Function type %v not available, aborting execution!", a.ActionType))
|
||||
return
|
||||
}
|
||||
for _, accId := range at.AccountIds {
|
||||
_, err := Guardian.Guard(func() (interface{}, error) {
|
||||
ub, err := accountingStorage.GetAccount(accId)
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Could not get user balances for this id: %s. Skipping!", 0, accId))
|
||||
utils.Logger.Warning(fmt.Sprintf("Could not get user balances for this id: %s. Skipping!", 0, accId))
|
||||
return 0, err
|
||||
} else if ub.Disabled && a.ActionType != ENABLE_ACCOUNT {
|
||||
return 0, fmt.Errorf("Account %s is disabled", accId)
|
||||
}
|
||||
//Logger.Info(fmt.Sprintf("Executing %v on %+v", a.ActionType, ub))
|
||||
//utils.Logger.Info(fmt.Sprintf("Executing %v on %+v", a.ActionType, ub))
|
||||
err = actionFunction(ub, nil, a, aac)
|
||||
//Logger.Info(fmt.Sprintf("After execute, account: %+v", ub))
|
||||
//utils.Logger.Info(fmt.Sprintf("After execute, account: %+v", ub))
|
||||
accountingStorage.SetAccount(ub)
|
||||
return 0, nil
|
||||
}, 0, accId)
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("Error executing action plan: %v", err))
|
||||
utils.Logger.Warning(fmt.Sprintf("Error executing action plan: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (at *ActionTrigger) Execute(ub *Account, sq *StatsQueueTriggered) (err erro
|
||||
aac, err = ratingStorage.GetActions(at.ActionsId, false)
|
||||
aac.Sort()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Failed to get actions: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Failed to get actions: %v", err))
|
||||
return
|
||||
}
|
||||
at.Executed = true
|
||||
@@ -80,10 +80,10 @@ func (at *ActionTrigger) Execute(ub *Account, sq *StatsQueueTriggered) (err erro
|
||||
a.Balance.ExpirationDate, _ = utils.ParseDate(a.ExpirationString)
|
||||
actionFunction, exists := getActionFunc(a.ActionType)
|
||||
if !exists {
|
||||
Logger.Warning(fmt.Sprintf("Function type %v not available, aborting execution!", a.ActionType))
|
||||
utils.Logger.Warning(fmt.Sprintf("Function type %v not available, aborting execution!", a.ActionType))
|
||||
return
|
||||
}
|
||||
//go Logger.Info(fmt.Sprintf("Executing %v, %v: %v", ub, sq, a))
|
||||
//go utils.Logger.Info(fmt.Sprintf("Executing %v, %v: %v", ub, sq, a))
|
||||
err = actionFunction(ub, sq, a, aac)
|
||||
if err == nil {
|
||||
atLeastOneActionExecuted = true
|
||||
|
||||
@@ -225,7 +225,7 @@ func (b *Balance) GetMinutesForCredit(origCD *CallDescriptor, initialCredit floa
|
||||
credit = initialCredit
|
||||
cc, err := b.GetCost(cd, false)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error getting new cost for balance subject: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error getting new cost for balance subject: %v", err))
|
||||
return 0, credit
|
||||
}
|
||||
if cc.deductConnectFee {
|
||||
@@ -423,7 +423,7 @@ func (b *Balance) DebitUnits(cd *CallDescriptor, ub *Account, moneyBalances Bala
|
||||
}
|
||||
|
||||
if ts.RateInterval == nil {
|
||||
Logger.Err(fmt.Sprintf("Nil RateInterval ERROR on TS: %+v, CC: %+v, from CD: %+v", ts, cc, cd))
|
||||
utils.Logger.Err(fmt.Sprintf("Nil RateInterval ERROR on TS: %+v, CC: %+v, from CD: %+v", ts, cc, cd))
|
||||
return nil, errors.New("timespan with no rate interval assigned")
|
||||
}
|
||||
maxCost, strategy := ts.RateInterval.GetMaxCost()
|
||||
@@ -523,7 +523,7 @@ func (b *Balance) DebitMoney(cd *CallDescriptor, ub *Account, count bool, dryRun
|
||||
}
|
||||
//log.Printf("TS: %+v", ts)
|
||||
if ts.RateInterval == nil {
|
||||
Logger.Err(fmt.Sprintf("Nil RateInterval ERROR on TS: %+v, CC: %+v, from CD: %+v", ts, cc, cd))
|
||||
utils.Logger.Err(fmt.Sprintf("Nil RateInterval ERROR on TS: %+v, CC: %+v, from CD: %+v", ts, cc, cd))
|
||||
return nil, errors.New("timespan with no rate interval assigned")
|
||||
}
|
||||
maxCost, strategy := ts.RateInterval.GetMaxCost()
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"log/syslog"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -41,12 +40,6 @@ const (
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
Logger, err = syslog.New(syslog.LOG_INFO, "CGRateS")
|
||||
if err != nil {
|
||||
Logger = new(utils.StdLogger)
|
||||
Logger.Err(fmt.Sprintf("Could not connect to syslog: %v", err))
|
||||
}
|
||||
if DEBUG {
|
||||
ratingStorage, _ = NewMapStorage()
|
||||
accountingStorage, _ = NewMapStorage()
|
||||
@@ -59,7 +52,6 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
Logger utils.LoggerInterface
|
||||
ratingStorage RatingStorage
|
||||
accountingStorage AccountingStorage
|
||||
storageLogger LogStorage
|
||||
@@ -345,13 +337,13 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Logger.Debug(fmt.Sprintf("After SplitByRatingPlan: %+v", timespans))
|
||||
// utils.Logger.Debug(fmt.Sprintf("After SplitByRatingPlan: %+v", timespans))
|
||||
// split on rate intervals
|
||||
for i := 0; i < len(timespans); i++ {
|
||||
//log.Printf("==============%v==================", i)
|
||||
//log.Printf("TS: %+v", timespans[i])
|
||||
rp := timespans[i].ratingInfo
|
||||
// Logger.Debug(fmt.Sprintf("rp: %+v", rp))
|
||||
// utils.Logger.Debug(fmt.Sprintf("rp: %+v", rp))
|
||||
//timespans[i].RatingPlan = nil
|
||||
rp.RateIntervals.Sort()
|
||||
/*for _, interval := range rp.RateIntervals {
|
||||
@@ -383,10 +375,10 @@ func (cd *CallDescriptor) splitInTimeSpans() (timespans []*TimeSpan) {
|
||||
//log.Print(timespans[i].RateInterval.Timing)
|
||||
}
|
||||
|
||||
//Logger.Debug(fmt.Sprintf("After SplitByRateInterval: %+v", timespans))
|
||||
//utils.Logger.Debug(fmt.Sprintf("After SplitByRateInterval: %+v", timespans))
|
||||
//log.Printf("After SplitByRateInterval: %+v", timespans[0].RateInterval.Timing)
|
||||
timespans = cd.roundTimeSpansToIncrement(timespans)
|
||||
// Logger.Debug(fmt.Sprintf("After round: %+v", timespans))
|
||||
// utils.Logger.Debug(fmt.Sprintf("After round: %+v", timespans))
|
||||
//log.Printf("After round: %+v", timespans[0].RateInterval.Timing)
|
||||
return
|
||||
}
|
||||
@@ -475,7 +467,7 @@ func (cd *CallDescriptor) getCost() (*CallCost, error) {
|
||||
}
|
||||
err := cd.LoadRatingPlans()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("error getting cost for key <%s>: %s", cd.GetKey(cd.Subject), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("error getting cost for key <%s>: %s", cd.GetKey(cd.Subject), err.Error()))
|
||||
return &CallCost{Cost: -1}, err
|
||||
}
|
||||
timespans := cd.splitInTimeSpans()
|
||||
@@ -498,7 +490,7 @@ func (cd *CallDescriptor) getCost() (*CallCost, error) {
|
||||
// global rounding
|
||||
roundingDecimals, roundingMethod := cc.GetLongestRounding()
|
||||
cc.Cost = utils.Round(cc.Cost, roundingDecimals, roundingMethod)
|
||||
//Logger.Info(fmt.Sprintf("<Rater> Get Cost: %s => %v", cd.GetKey(), cc))
|
||||
//utils.Logger.Info(fmt.Sprintf("<Rater> Get Cost: %s => %v", cd.GetKey(), cc))
|
||||
cc.Timespans.Compress()
|
||||
return cc, err
|
||||
}
|
||||
@@ -521,23 +513,23 @@ func (origCD *CallDescriptor) getMaxSessionDuration(origAcc *Account) (time.Dura
|
||||
if origCD.TOR == "" {
|
||||
origCD.TOR = utils.VOICE
|
||||
}
|
||||
//Logger.Debug("ORIG: " + utils.ToJSON(origCD))
|
||||
//utils.Logger.Debug("ORIG: " + utils.ToJSON(origCD))
|
||||
cd := origCD.Clone()
|
||||
initialDuration := cd.TimeEnd.Sub(cd.TimeStart)
|
||||
//Logger.Debug(fmt.Sprintf("INITIAL_DURATION: %v", initialDuration))
|
||||
//utils.Logger.Debug(fmt.Sprintf("INITIAL_DURATION: %v", initialDuration))
|
||||
defaultBalance := account.GetDefaultMoneyBalance(cd.Direction)
|
||||
|
||||
//use this to check what increment was payed with debt
|
||||
initialDefaultBalanceValue := defaultBalance.GetValue()
|
||||
|
||||
//Logger.Debug("ACCOUNT: " + utils.ToJSON(account))
|
||||
//Logger.Debug("DEFAULT_BALANCE: " + utils.ToJSON(defaultBalance))
|
||||
//utils.Logger.Debug("ACCOUNT: " + utils.ToJSON(account))
|
||||
//utils.Logger.Debug("DEFAULT_BALANCE: " + utils.ToJSON(defaultBalance))
|
||||
|
||||
//
|
||||
cc, err := cd.debit(account, true, false)
|
||||
//Logger.Debug("CC: " + utils.ToJSON(cc))
|
||||
//utils.Logger.Debug("CC: " + utils.ToJSON(cc))
|
||||
//log.Print("CC: ", utils.ToIJSON(cc))
|
||||
//Logger.Debug(fmt.Sprintf("ERR: %v", err))
|
||||
//utils.Logger.Debug(fmt.Sprintf("ERR: %v", err))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -551,12 +543,12 @@ func (origCD *CallDescriptor) getMaxSessionDuration(origAcc *Account) (time.Dura
|
||||
for _, ts := range cc.Timespans {
|
||||
//if ts.RateInterval != nil {
|
||||
//log.Printf("TS: %+v", ts)
|
||||
//Logger.Debug("TS: " + utils.ToJSON(ts))
|
||||
//utils.Logger.Debug("TS: " + utils.ToJSON(ts))
|
||||
//}
|
||||
if cd.MaxRate > 0 && cd.MaxRateUnit > 0 {
|
||||
rate, _, rateUnit := ts.RateInterval.GetRateParameters(ts.GetGroupStart())
|
||||
if rate/rateUnit.Seconds() > cd.MaxRate/cd.MaxRateUnit.Seconds() {
|
||||
//Logger.Debug(fmt.Sprintf("0_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
//utils.Logger.Debug(fmt.Sprintf("0_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
return utils.MinDuration(initialDuration, totalDuration), nil
|
||||
}
|
||||
}
|
||||
@@ -564,14 +556,14 @@ func (origCD *CallDescriptor) getMaxSessionDuration(origAcc *Account) (time.Dura
|
||||
ts.createIncrementsSlice()
|
||||
}
|
||||
for _, incr := range ts.Increments {
|
||||
//Logger.Debug("INCR: " + utils.ToJSON(incr))
|
||||
//utils.Logger.Debug("INCR: " + utils.ToJSON(incr))
|
||||
totalCost += incr.Cost
|
||||
if incr.BalanceInfo.MoneyBalanceUuid == defaultBalance.Uuid {
|
||||
initialDefaultBalanceValue -= incr.Cost
|
||||
if initialDefaultBalanceValue < 0 {
|
||||
// this increment was payed with debt
|
||||
// TODO: improve this check
|
||||
//Logger.Debug(fmt.Sprintf("1_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
//utils.Logger.Debug(fmt.Sprintf("1_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
return utils.MinDuration(initialDuration, totalDuration), nil
|
||||
|
||||
}
|
||||
@@ -579,19 +571,19 @@ func (origCD *CallDescriptor) getMaxSessionDuration(origAcc *Account) (time.Dura
|
||||
totalDuration += incr.Duration
|
||||
if totalDuration >= initialDuration {
|
||||
// we have enough, return
|
||||
//Logger.Debug(fmt.Sprintf("2_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
//utils.Logger.Debug(fmt.Sprintf("2_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
return initialDuration, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
//Logger.Debug(fmt.Sprintf("3_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
//utils.Logger.Debug(fmt.Sprintf("3_INIT DUR %v, TOTAL DUR: %v", initialDuration, totalDuration))
|
||||
return utils.MinDuration(initialDuration, totalDuration), nil
|
||||
}
|
||||
|
||||
func (cd *CallDescriptor) GetMaxSessionDuration() (duration time.Duration, err error) {
|
||||
cd.account = nil // make sure it's not cached
|
||||
if account, err := cd.getAccount(); err != nil || account == nil {
|
||||
Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
return 0, err
|
||||
} else {
|
||||
if memberIds, err := account.GetUniqueSharedGroupMembers(cd); err == nil {
|
||||
@@ -624,7 +616,7 @@ func (cd *CallDescriptor) debit(account *Account, dryRun bool, goNegative bool)
|
||||
cc, err = account.debitCreditBalance(cd, !dryRun, dryRun, goNegative)
|
||||
//log.Printf("HERE: %+v %v", cc, err)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<Rater> Error getting cost for account key <%s>: %s", cd.GetAccountKey(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<Rater> Error getting cost for account key <%s>: %s", cd.GetAccountKey(), err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
cost := 0.0
|
||||
@@ -646,7 +638,7 @@ func (cd *CallDescriptor) Debit() (cc *CallCost, err error) {
|
||||
cd.account = nil // make sure it's not cached
|
||||
// lock all group members
|
||||
if account, err := cd.getAccount(); err != nil || account == nil {
|
||||
Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
return nil, err
|
||||
} else {
|
||||
if memberIds, err := account.GetUniqueSharedGroupMembers(cd); err == nil {
|
||||
@@ -668,7 +660,7 @@ func (cd *CallDescriptor) Debit() (cc *CallCost, err error) {
|
||||
func (cd *CallDescriptor) MaxDebit() (cc *CallCost, err error) {
|
||||
cd.account = nil // make sure it's not cached
|
||||
if account, err := cd.getAccount(); err != nil || account == nil {
|
||||
Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not get user balance for <%s>: %s.", cd.GetAccountKey(), err.Error()))
|
||||
return nil, err
|
||||
} else {
|
||||
//log.Printf("ACC: %+v", account)
|
||||
|
||||
@@ -43,11 +43,11 @@ type CallCostLog struct {
|
||||
func cgrCdrHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cgrCdr, err := NewCgrCdrFromHttpReq(r, cdrServer.cgrCfg.DefaultTimezone)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Could not create CDR entry: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Could not create CDR entry: %s", err.Error()))
|
||||
return
|
||||
}
|
||||
if err := cdrServer.processCdr(cgrCdr.AsStoredCdr(cdrServer.cgrCfg.DefaultTimezone)); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Errors when storing CDR entry: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Errors when storing CDR entry: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,11 +56,11 @@ func fsCdrHandler(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
fsCdr, err := NewFSCdr(body, cdrServer.cgrCfg)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Could not create CDR entry: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Could not create CDR entry: %s", err.Error()))
|
||||
return
|
||||
}
|
||||
if err := cdrServer.processCdr(fsCdr.AsStoredCdr(cdrServer.Timezone())); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Errors when storing CDR entry: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Errors when storing CDR entry: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func (self *CdrServer) RateCdrs(cgrIds, runIds, tors, cdrHosts, cdrSources, reqT
|
||||
}
|
||||
for _, cdr := range cdrs {
|
||||
if err := self.processCdr(cdr); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Processing CDR %+v, got error: %s", cdr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Processing CDR %+v, got error: %s", cdr, err.Error()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -187,7 +187,7 @@ func (self *CdrServer) processCdr(storedCdr *StoredCdr) (err error) {
|
||||
}
|
||||
if self.cgrCfg.CDRSStoreCdrs { // Store RawCDRs, this we do sync so we can reply with the status
|
||||
if err := self.cdrDb.SetCdr(storedCdr); err != nil { // Only original CDR stored in primary table, no derived
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Storing primary CDR %+v, got error: %s", storedCdr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Storing primary CDR %+v, got error: %s", storedCdr, err.Error()))
|
||||
}
|
||||
}
|
||||
go self.deriveRateStoreStatsReplicate(storedCdr)
|
||||
@@ -211,19 +211,19 @@ func (self *CdrServer) deriveRateStoreStatsReplicate(storedCdr *StoredCdr) error
|
||||
if self.cgrCfg.CDRSStoreCdrs { // Store CDRs
|
||||
// Store RatedCDR
|
||||
if err := self.cdrDb.SetRatedCdr(cdr); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Storing rated CDR %+v, got error: %s", cdr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Storing rated CDR %+v, got error: %s", cdr, err.Error()))
|
||||
}
|
||||
// Store CostDetails
|
||||
if cdr.Rated || utils.IsSliceMember([]string{utils.RATED, utils.META_RATED}, cdr.ReqType) { // Account related CDRs are saved automatically, so save the others here if requested
|
||||
if err := self.cdrDb.LogCallCost(cdr.CgrId, utils.CDRS_SOURCE, cdr.MediationRunId, cdr.CostDetails); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Storing costs for CDR %+v, costDetails: %+v, got error: %s", cdr, cdr.CostDetails, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Storing costs for CDR %+v, costDetails: %+v, got error: %s", cdr, cdr.CostDetails, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Attach CDR to stats
|
||||
if self.stats != nil { // Send CDR to stats
|
||||
if err := self.stats.AppendCDR(cdr, nil); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRS> Could not append cdr to stats: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRS> Could not append cdr to stats: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
if len(self.cgrCfg.CDRSCdrReplication) != 0 {
|
||||
@@ -246,7 +246,7 @@ func (self *CdrServer) deriveCdrs(storedCdr *StoredCdr) ([]*StoredCdr, error) {
|
||||
Account: storedCdr.Account, Subject: storedCdr.Subject}
|
||||
var dcs utils.DerivedChargers
|
||||
if err := self.rater.GetDerivedChargers(attrsDC, &dcs); err != nil {
|
||||
Logger.Err(fmt.Sprintf("Could not get derived charging for cgrid %s, error: %s", storedCdr.CgrId, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not get derived charging for cgrid %s, error: %s", storedCdr.CgrId, err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
for _, dc := range dcs {
|
||||
@@ -277,7 +277,7 @@ func (self *CdrServer) deriveCdrs(storedCdr *StoredCdr) ([]*StoredCdr, error) {
|
||||
forkedCdr, err := storedCdr.ForkCdr(dc.RunId, dcReqTypeFld, dcDirFld, dcTenantFld, dcCategoryFld, dcAcntFld, dcSubjFld, dcDstFld,
|
||||
dcSTimeFld, dcPddFld, dcATimeFld, dcDurFld, dcSupplFld, dcDCausseld, []*utils.RSRField{}, true, self.cgrCfg.DefaultTimezone)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Could not fork CGR with cgrid %s, run: %s, error: %s", storedCdr.CgrId, dc.RunId, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not fork CGR with cgrid %s, run: %s, error: %s", storedCdr.CgrId, dc.RunId, err.Error()))
|
||||
continue // do not add it to the forked CDR list
|
||||
}
|
||||
cdrRuns = append(cdrRuns, forkedCdr)
|
||||
@@ -331,7 +331,7 @@ func (self *CdrServer) rateCDR(storedCdr *StoredCdr) error {
|
||||
time.Sleep(delay())
|
||||
}
|
||||
if err != nil && err == gorm.RecordNotFound { //calculate CDR as for pseudoprepaid
|
||||
Logger.Warning(fmt.Sprintf("<Cdrs> WARNING: Could not find CallCostLog for cgrid: %s, source: %s, runid: %s, will recalculate", storedCdr.CgrId, utils.SESSION_MANAGER_SOURCE, storedCdr.MediationRunId))
|
||||
utils.Logger.Warning(fmt.Sprintf("<Cdrs> WARNING: Could not find CallCostLog for cgrid: %s, source: %s, runid: %s, will recalculate", storedCdr.CgrId, utils.SESSION_MANAGER_SOURCE, storedCdr.MediationRunId))
|
||||
qryCC, err = self.getCostFromRater(storedCdr)
|
||||
}
|
||||
|
||||
@@ -366,7 +366,7 @@ func (self *CdrServer) replicateCdr(cdr *StoredCdr) error {
|
||||
errChan := make(chan error)
|
||||
go func(cdr *StoredCdr, rplCfg *config.CdrReplicationCfg, errChan chan error) {
|
||||
if _, err := httpClient.PostForm(fmt.Sprintf("%s", rplCfg.Server), cdr.AsHttpForm()); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CDRReplicator> Replicating CDR: %+v, got error: %s", cdr, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<CDRReplicator> Replicating CDR: %+v, got error: %s", cdr, err.Error()))
|
||||
errChan <- err
|
||||
}
|
||||
errChan <- nil
|
||||
|
||||
@@ -108,11 +108,11 @@ func (fsCdr FSCdr) searchExtraField(field string, body map[string]interface{}) (
|
||||
return
|
||||
}
|
||||
} else {
|
||||
Logger.Warning(fmt.Sprintf("Slice with no maps: %v", reflect.TypeOf(item)))
|
||||
utils.Logger.Warning(fmt.Sprintf("Slice with no maps: %v", reflect.TypeOf(item)))
|
||||
}
|
||||
}
|
||||
default:
|
||||
Logger.Warning(fmt.Sprintf("Unexpected type: %v", reflect.TypeOf(v)))
|
||||
utils.Logger.Warning(fmt.Sprintf("Unexpected type: %v", reflect.TypeOf(v)))
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@@ -393,12 +393,12 @@ func (lc *LCRCost) GetSupplierRatio(supplier string) int {
|
||||
for _, param := range params {
|
||||
ratioSlice := strings.Split(param, utils.CONCATENATED_KEY_SEP)
|
||||
if len(ratioSlice) != 2 {
|
||||
Logger.Warning(fmt.Sprintf("bad format in load distribution strategy param: %s", lc.Entry.StrategyParams))
|
||||
utils.Logger.Warning(fmt.Sprintf("bad format in load distribution strategy param: %s", lc.Entry.StrategyParams))
|
||||
continue
|
||||
}
|
||||
p, err := strconv.Atoi(ratioSlice[1])
|
||||
if err != nil {
|
||||
Logger.Warning(fmt.Sprintf("bad format in load distribution strategy param: %s", lc.Entry.StrategyParams))
|
||||
utils.Logger.Warning(fmt.Sprintf("bad format in load distribution strategy param: %s", lc.Entry.StrategyParams))
|
||||
continue
|
||||
}
|
||||
ratios[ratioSlice[0]] = p
|
||||
@@ -432,7 +432,7 @@ func (lc *LCRCost) HasErrors() bool {
|
||||
func (lc *LCRCost) LogErrors() {
|
||||
for _, supplCost := range lc.SupplierCosts {
|
||||
if len(supplCost.Error) != 0 {
|
||||
Logger.Err(fmt.Sprintf("LCR_ERROR: supplier <%s>, error <%s>", supplCost.Supplier, supplCost.Error))
|
||||
utils.Logger.Err(fmt.Sprintf("LCR_ERROR: supplier <%s>, error <%s>", supplCost.Supplier, supplCost.Error))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,13 +69,13 @@ func (ps *PubSub) saveSubscriber(key string) {
|
||||
return
|
||||
}
|
||||
if err := accountingStorage.SetSubscriber(key, subData); err != nil {
|
||||
Logger.Err("<PubSub> Error saving subscriber: " + err.Error())
|
||||
utils.Logger.Err("<PubSub> Error saving subscriber: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PubSub) removeSubscriber(key string) {
|
||||
if err := accountingStorage.RemoveSubscriber(key); err != nil {
|
||||
Logger.Err("<PubSub> Error removing subscriber: " + err.Error())
|
||||
utils.Logger.Err("<PubSub> Error removing subscriber: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func (ps *PubSub) Publish(evt CgrEvent, reply *string) error {
|
||||
}
|
||||
split := utils.InfieldSplit(key)
|
||||
if len(split) != 2 {
|
||||
Logger.Warning("<PubSub> Wrong transport;address pair: " + key)
|
||||
utils.Logger.Warning("<PubSub> Wrong transport;address pair: " + key)
|
||||
continue
|
||||
}
|
||||
transport := split[0]
|
||||
@@ -147,7 +147,7 @@ func (ps *PubSub) Publish(evt CgrEvent, reply *string) error {
|
||||
if _, err := ps.pubFunc(address, ps.ttlVerify, evt); err == nil {
|
||||
break // Success, no need to reinterate
|
||||
} else if i == 4 { // Last iteration, syslog the warning
|
||||
Logger.Warning(fmt.Sprintf("<PubSub> Failed calling url: [%s], error: [%s], event type: %s", address, err.Error(), evt["EventName"]))
|
||||
utils.Logger.Warning(fmt.Sprintf("<PubSub> Failed calling url: [%s], error: [%s], event type: %s", address, err.Error(), evt["EventName"]))
|
||||
break
|
||||
}
|
||||
time.Sleep(delay())
|
||||
|
||||
@@ -117,7 +117,7 @@ func (rp *RatingProfile) GetRatingPlansForPrefix(cd *CallDescriptor) (err error)
|
||||
for index, rpa := range rp.RatingPlanActivations.GetActiveForCall(cd) {
|
||||
rpl, err := ratingStorage.GetRatingPlan(rpa.RatingPlanId, false)
|
||||
if err != nil || rpl == nil {
|
||||
Logger.Err(fmt.Sprintf("Error checking destination: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error checking destination: %v", err))
|
||||
continue
|
||||
}
|
||||
prefix := ""
|
||||
|
||||
@@ -549,7 +549,7 @@ func (rs *Responder) getCallCost(key *CallDescriptor, method string) (reply *Cal
|
||||
for err != nil {
|
||||
client := rs.Bal.Balance()
|
||||
if client == nil {
|
||||
Logger.Info("<Balancer> Waiting for raters to register...")
|
||||
utils.Logger.Info("<Balancer> Waiting for raters to register...")
|
||||
time.Sleep(1 * time.Second) // wait one second and retry
|
||||
} else {
|
||||
_, err = Guardian.Guard(func() (interface{}, error) {
|
||||
@@ -557,7 +557,7 @@ func (rs *Responder) getCallCost(key *CallDescriptor, method string) (reply *Cal
|
||||
return reply, err
|
||||
}, 0, key.GetAccountKey())
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<Balancer> Got en error from rater: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("<Balancer> Got en error from rater: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -572,7 +572,7 @@ func (rs *Responder) callMethod(key *CallDescriptor, method string) (reply float
|
||||
for err != nil {
|
||||
client := rs.Bal.Balance()
|
||||
if client == nil {
|
||||
Logger.Info("Waiting for raters to register...")
|
||||
utils.Logger.Info("Waiting for raters to register...")
|
||||
time.Sleep(1 * time.Second) // wait one second and retry
|
||||
} else {
|
||||
_, err = Guardian.Guard(func() (interface{}, error) {
|
||||
@@ -580,7 +580,7 @@ func (rs *Responder) callMethod(key *CallDescriptor, method string) (reply float
|
||||
return reply, err
|
||||
}, 0, key.GetAccountKey())
|
||||
if err != nil {
|
||||
Logger.Info(fmt.Sprintf("Got en error from rater: %v", err))
|
||||
utils.Logger.Info(fmt.Sprintf("Got en error from rater: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -591,15 +591,15 @@ func (rs *Responder) callMethod(key *CallDescriptor, method string) (reply float
|
||||
RPC method that receives a rater address, connects to it and ads the pair to the rater list for balancing
|
||||
*/
|
||||
func (rs *Responder) RegisterRater(clientAddress string, replay *int) error {
|
||||
Logger.Info(fmt.Sprintf("Started rater %v registration...", clientAddress))
|
||||
utils.Logger.Info(fmt.Sprintf("Started rater %v registration...", clientAddress))
|
||||
time.Sleep(2 * time.Second) // wait a second for Rater to start serving
|
||||
client, err := rpc.Dial("tcp", clientAddress)
|
||||
if err != nil {
|
||||
Logger.Err("Could not connect to client!")
|
||||
utils.Logger.Err("Could not connect to client!")
|
||||
return err
|
||||
}
|
||||
rs.Bal.AddClient(clientAddress, client)
|
||||
Logger.Info(fmt.Sprintf("Rater %v registered succesfully.", clientAddress))
|
||||
utils.Logger.Info(fmt.Sprintf("Rater %v registered succesfully.", clientAddress))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -611,9 +611,9 @@ func (rs *Responder) UnRegisterRater(clientAddress string, replay *int) error {
|
||||
if ok {
|
||||
client.Close()
|
||||
rs.Bal.RemoveClient(clientAddress)
|
||||
Logger.Info(fmt.Sprintf("Rater %v unregistered succesfully.", clientAddress))
|
||||
utils.Logger.Info(fmt.Sprintf("Rater %v unregistered succesfully.", clientAddress))
|
||||
} else {
|
||||
Logger.Info(fmt.Sprintf("Server %v was not on my watch!", clientAddress))
|
||||
utils.Logger.Info(fmt.Sprintf("Server %v was not on my watch!", clientAddress))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"net/rpc"
|
||||
"net/rpc/jsonrpc"
|
||||
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -58,15 +59,15 @@ func (s *Server) ServeJSON(addr string) {
|
||||
if e != nil {
|
||||
log.Fatal("listen error:", e)
|
||||
}
|
||||
Logger.Info(fmt.Sprintf("Starting CGRateS JSON server at %s.", addr))
|
||||
utils.Logger.Info(fmt.Sprintf("Starting CGRateS JSON server at %s.", addr))
|
||||
for {
|
||||
conn, err := lJSON.Accept()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CGRServer> Accept error: %v", conn))
|
||||
utils.Logger.Err(fmt.Sprintf("<CGRServer> Accept error: %v", conn))
|
||||
continue
|
||||
}
|
||||
|
||||
//Logger.Info(fmt.Sprintf("<CGRServer> New incoming connection: %v", conn.RemoteAddr()))
|
||||
//utils.Logger.Info(fmt.Sprintf("<CGRServer> New incoming connection: %v", conn.RemoteAddr()))
|
||||
go jsonrpc.ServeConn(conn)
|
||||
}
|
||||
|
||||
@@ -80,15 +81,15 @@ func (s *Server) ServeGOB(addr string) {
|
||||
if e != nil {
|
||||
log.Fatal("listen error:", e)
|
||||
}
|
||||
Logger.Info(fmt.Sprintf("Starting CGRateS GOB server at %s.", addr))
|
||||
utils.Logger.Info(fmt.Sprintf("Starting CGRateS GOB server at %s.", addr))
|
||||
for {
|
||||
conn, err := lGOB.Accept()
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("<CGRServer> Accept error: %v", conn))
|
||||
utils.Logger.Err(fmt.Sprintf("<CGRServer> Accept error: %v", conn))
|
||||
continue
|
||||
}
|
||||
|
||||
//Logger.Info(fmt.Sprintf("<CGRServer> New incoming connection: %v", conn.RemoteAddr()))
|
||||
//utils.Logger.Info(fmt.Sprintf("<CGRServer> New incoming connection: %v", conn.RemoteAddr()))
|
||||
go rpc.ServeConn(conn)
|
||||
}
|
||||
}
|
||||
@@ -109,7 +110,7 @@ func (s *Server) ServeHTTP(addr string) {
|
||||
if !s.httpEnabled {
|
||||
return
|
||||
}
|
||||
Logger.Info(fmt.Sprintf("Starting CGRateS HTTP server at %s.", addr))
|
||||
utils.Logger.Info(fmt.Sprintf("Starting CGRateS HTTP server at %s.", addr))
|
||||
http.ListenAndServe(addr, nil)
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewStats(ratingDb RatingStorage, accountingDb AccountingStorage, saveInterv
|
||||
if css, err := ratingDb.GetAllCdrStats(); err == nil {
|
||||
cdrStats.UpdateQueues(css, nil)
|
||||
} else {
|
||||
Logger.Err(fmt.Sprintf("Cannot load cdr stats: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Cannot load cdr stats: %v", err))
|
||||
}
|
||||
return cdrStats
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func (s *Stats) ResetQueues(ids []string, out *int) error {
|
||||
for _, id := range ids {
|
||||
sq, exists := s.queues[id]
|
||||
if !exists {
|
||||
Logger.Warning(fmt.Sprintf("Cannot reset queue id %v: Not Fund", id))
|
||||
utils.Logger.Warning(fmt.Sprintf("Cannot reset queue id %v: Not Fund", id))
|
||||
continue
|
||||
}
|
||||
sq.Cdrs = make([]*QCdr, 0)
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cgrates/cgrates/utils"
|
||||
)
|
||||
|
||||
type StatsQueue struct {
|
||||
@@ -91,7 +93,7 @@ func (sq *StatsQueue) Save(adb AccountingStorage) {
|
||||
defer sq.mux.Unlock()
|
||||
if sq.dirty {
|
||||
if err := adb.SetCdrStatsQueue(sq); err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error saving cdr stats queue id %s: %v", sq.GetId(), err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error saving cdr stats queue id %s: %v", sq.GetId(), err))
|
||||
return
|
||||
}
|
||||
sq.dirty = false
|
||||
|
||||
@@ -54,7 +54,7 @@ func (cs *CassandraStorage) LogCallCost(cgrid, source, runid string, cc *CallCos
|
||||
}
|
||||
tss, err := json.Marshal(cc.Timespans)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
return err
|
||||
}
|
||||
if err = cs.db.Query(fmt.Sprintf("INSERT INTO %s (cgrid,runid,tor,direction,tenant,category,account,subject,destination,cost,timespans,cost_source,created_at) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s',%f,'%s','%s','%s') tor=values(tor),direction=values(direction),tenant=values(tenant),category=values(category),account=values(account),subject=values(subject),destination=values(destination),cost=values(cost),timespans=values(timespans),cost_source=values(cost_source),updated_at='%s'",
|
||||
@@ -73,7 +73,7 @@ func (cs *CassandraStorage) LogCallCost(cgrid, source, runid string, cc *CallCos
|
||||
source,
|
||||
time.Now().Format(time.RFC3339),
|
||||
time.Now().Format(time.RFC3339))).Exec(); err != nil {
|
||||
Logger.Err(fmt.Sprintf("failed to execute insert statement: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("failed to execute insert statement: %v", err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -68,7 +68,7 @@ func (self *MySQLStorage) LogCallCost(cgrid, source, runid string, cc *CallCost)
|
||||
}
|
||||
tss, err := json.Marshal(cc.Timespans)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
return err
|
||||
}
|
||||
_, err = self.Db.Exec(fmt.Sprintf("INSERT INTO %s (cgrid,runid,tor,direction,tenant,category,account,subject,destination,cost,timespans,cost_source,created_at) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s',%f,'%s','%s','%s') ON DUPLICATE KEY UPDATE tor=values(tor),direction=values(direction),tenant=values(tenant),category=values(category),account=values(account),subject=values(subject),destination=values(destination),cost=values(cost),timespans=values(timespans),cost_source=values(cost_source),updated_at='%s'",
|
||||
@@ -88,7 +88,7 @@ func (self *MySQLStorage) LogCallCost(cgrid, source, runid string, cc *CallCost)
|
||||
time.Now().Format(time.RFC3339),
|
||||
time.Now().Format(time.RFC3339)))
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("failed to execute insert statement: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("failed to execute insert statement: %v", err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -117,7 +117,7 @@ func (self *MySQLStorage) SetRatedCdr(storedCdr *StoredCdr) (err error) {
|
||||
time.Now().Format(time.RFC3339),
|
||||
time.Now().Format(time.RFC3339)))
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("failed to execute cdr insert statement: %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("failed to execute cdr insert statement: %s", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (self *PostgresStorage) LogCallCost(cgrid, source, runid string, cc *CallCo
|
||||
}
|
||||
tss, err := json.Marshal(cc.Timespans)
|
||||
if err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error marshalling timespans to json: %v", err))
|
||||
return err
|
||||
}
|
||||
tx := self.db.Begin()
|
||||
|
||||
@@ -113,19 +113,19 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
cache2go.BeginTransaction()
|
||||
if dKeys == nil || (float64(cache2go.CountEntries(utils.DESTINATION_PREFIX))*utils.DESTINATIONS_LOAD_THRESHOLD < float64(len(dKeys))) {
|
||||
// if need to load more than a half of exiting keys load them all
|
||||
Logger.Info("Caching all destinations")
|
||||
utils.Logger.Info("Caching all destinations")
|
||||
if dKeys, err = rs.db.Keys(utils.DESTINATION_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
cache2go.RemPrefixKey(utils.DESTINATION_PREFIX)
|
||||
} else if len(dKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching destinations: %v", dKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching destinations: %v", dKeys))
|
||||
CleanStalePrefixes(dKeys)
|
||||
}
|
||||
for _, key := range dKeys {
|
||||
if len(key) <= len(utils.DESTINATION_PREFIX) {
|
||||
Logger.Warning(fmt.Sprintf("Got malformed destination id: %s", key))
|
||||
utils.Logger.Warning(fmt.Sprintf("Got malformed destination id: %s", key))
|
||||
continue
|
||||
}
|
||||
if _, err = rs.GetDestination(key[len(utils.DESTINATION_PREFIX):]); err != nil {
|
||||
@@ -134,17 +134,17 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(dKeys) != 0 {
|
||||
Logger.Info("Finished destinations caching.")
|
||||
utils.Logger.Info("Finished destinations caching.")
|
||||
}
|
||||
if rpKeys == nil {
|
||||
Logger.Info("Caching all rating plans")
|
||||
utils.Logger.Info("Caching all rating plans")
|
||||
if rpKeys, err = rs.db.Keys(utils.RATING_PLAN_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
cache2go.RemPrefixKey(utils.RATING_PLAN_PREFIX)
|
||||
} else if len(rpKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching rating plans: %v", rpKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching rating plans: %v", rpKeys))
|
||||
}
|
||||
for _, key := range rpKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -154,17 +154,17 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(rpKeys) != 0 {
|
||||
Logger.Info("Finished rating plans caching.")
|
||||
utils.Logger.Info("Finished rating plans caching.")
|
||||
}
|
||||
if rpfKeys == nil {
|
||||
Logger.Info("Caching all rating profiles")
|
||||
utils.Logger.Info("Caching all rating profiles")
|
||||
if rpfKeys, err = rs.db.Keys(utils.RATING_PROFILE_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
cache2go.RemPrefixKey(utils.RATING_PROFILE_PREFIX)
|
||||
} else if len(rpfKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching rating profile: %v", rpfKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching rating profile: %v", rpfKeys))
|
||||
}
|
||||
for _, key := range rpfKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -174,17 +174,17 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(rpfKeys) != 0 {
|
||||
Logger.Info("Finished rating profile caching.")
|
||||
utils.Logger.Info("Finished rating profile caching.")
|
||||
}
|
||||
if lcrKeys == nil {
|
||||
Logger.Info("Caching LCR rules.")
|
||||
utils.Logger.Info("Caching LCR rules.")
|
||||
if lcrKeys, err = rs.db.Keys(utils.LCR_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
cache2go.RemPrefixKey(utils.LCR_PREFIX)
|
||||
} else if len(lcrKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching LCR rules: %v", lcrKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching LCR rules: %v", lcrKeys))
|
||||
}
|
||||
for _, key := range lcrKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -194,18 +194,18 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(lcrKeys) != 0 {
|
||||
Logger.Info("Finished LCR rules caching.")
|
||||
utils.Logger.Info("Finished LCR rules caching.")
|
||||
}
|
||||
// DerivedChargers caching
|
||||
if dcsKeys == nil {
|
||||
Logger.Info("Caching all derived chargers")
|
||||
utils.Logger.Info("Caching all derived chargers")
|
||||
if dcsKeys, err = rs.db.Keys(utils.DERIVEDCHARGERS_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
cache2go.RemPrefixKey(utils.DERIVEDCHARGERS_PREFIX)
|
||||
} else if len(dcsKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching derived chargers: %v", dcsKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching derived chargers: %v", dcsKeys))
|
||||
}
|
||||
for _, key := range dcsKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -215,19 +215,19 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(dcsKeys) != 0 {
|
||||
Logger.Info("Finished derived chargers caching.")
|
||||
utils.Logger.Info("Finished derived chargers caching.")
|
||||
}
|
||||
if actKeys == nil {
|
||||
cache2go.RemPrefixKey(utils.ACTION_PREFIX)
|
||||
}
|
||||
if actKeys == nil {
|
||||
Logger.Info("Caching all actions")
|
||||
utils.Logger.Info("Caching all actions")
|
||||
if actKeys, err = rs.db.Keys(utils.ACTION_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
} else if len(actKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching actions: %v", actKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching actions: %v", actKeys))
|
||||
}
|
||||
for _, key := range actKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -237,20 +237,20 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(actKeys) != 0 {
|
||||
Logger.Info("Finished actions caching.")
|
||||
utils.Logger.Info("Finished actions caching.")
|
||||
}
|
||||
|
||||
if shgKeys == nil {
|
||||
cache2go.RemPrefixKey(utils.SHARED_GROUP_PREFIX)
|
||||
}
|
||||
if shgKeys == nil {
|
||||
Logger.Info("Caching all shared groups")
|
||||
utils.Logger.Info("Caching all shared groups")
|
||||
if shgKeys, err = rs.db.Keys(utils.SHARED_GROUP_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
} else if len(shgKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching shared groups: %v", shgKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching shared groups: %v", shgKeys))
|
||||
}
|
||||
for _, key := range shgKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -260,7 +260,7 @@ func (rs *RedisStorage) cacheRating(dKeys, rpKeys, rpfKeys, lcrKeys, dcsKeys, ac
|
||||
}
|
||||
}
|
||||
if len(shgKeys) != 0 {
|
||||
Logger.Info("Finished shared groups caching.")
|
||||
utils.Logger.Info("Finished shared groups caching.")
|
||||
}
|
||||
|
||||
cache2go.CommitTransaction()
|
||||
@@ -303,13 +303,13 @@ func (rs *RedisStorage) cacheAccounting(alsKeys []string) (err error) {
|
||||
cache2go.RemPrefixKey(utils.ALIASES_PREFIX)
|
||||
}
|
||||
if alsKeys == nil {
|
||||
Logger.Info("Caching all aliases")
|
||||
utils.Logger.Info("Caching all aliases")
|
||||
if alsKeys, err = rs.db.Keys(utils.ALIASES_PREFIX + "*"); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
} else if len(alsKeys) != 0 {
|
||||
Logger.Info(fmt.Sprintf("Caching aliases: %v", alsKeys))
|
||||
utils.Logger.Info(fmt.Sprintf("Caching aliases: %v", alsKeys))
|
||||
}
|
||||
for _, key := range alsKeys {
|
||||
cache2go.RemKey(key)
|
||||
@@ -319,14 +319,14 @@ func (rs *RedisStorage) cacheAccounting(alsKeys []string) (err error) {
|
||||
}
|
||||
}
|
||||
if len(alsKeys) != 0 {
|
||||
Logger.Info("Finished aliases caching.")
|
||||
utils.Logger.Info("Finished aliases caching.")
|
||||
}
|
||||
Logger.Info("Caching load history")
|
||||
utils.Logger.Info("Caching load history")
|
||||
if _, err = rs.GetLoadHistory(1, true); err != nil {
|
||||
cache2go.RollbackTransaction()
|
||||
return err
|
||||
}
|
||||
Logger.Info("Finished load history caching.")
|
||||
utils.Logger.Info("Finished load history caching.")
|
||||
cache2go.CommitTransaction()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func ConfigureRatingStorage(db_type, host, port, name, user, pass, marshaler str
|
||||
var db_nb int
|
||||
db_nb, err = strconv.Atoi(name)
|
||||
if err != nil {
|
||||
Logger.Crit("Redis db name must be an integer!")
|
||||
utils.Logger.Crit("Redis db name must be an integer!")
|
||||
return nil, err
|
||||
}
|
||||
if port != "" {
|
||||
@@ -57,7 +57,7 @@ func ConfigureAccountingStorage(db_type, host, port, name, user, pass, marshaler
|
||||
var db_nb int
|
||||
db_nb, err = strconv.Atoi(name)
|
||||
if err != nil {
|
||||
Logger.Crit("Redis db name must be an integer!")
|
||||
utils.Logger.Crit("Redis db name must be an integer!")
|
||||
return nil, err
|
||||
}
|
||||
if port != "" {
|
||||
@@ -86,7 +86,7 @@ func ConfigureLogStorage(db_type, host, port, name, user, pass, marshaler string
|
||||
var db_nb int
|
||||
db_nb, err = strconv.Atoi(name)
|
||||
if err != nil {
|
||||
Logger.Crit("Redis db name must be an integer!")
|
||||
utils.Logger.Crit("Redis db name must be an integer!")
|
||||
return nil, err
|
||||
}
|
||||
if port != "" {
|
||||
|
||||
@@ -85,7 +85,7 @@ func (self *TPCSVImporter) Run() error {
|
||||
continue
|
||||
}
|
||||
if err := fHandler(self, f.Name()); err != nil {
|
||||
Logger.Err(fmt.Sprintf("<TPCSVImporter> Importing file: %s, got error: %s", f.Name(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<TPCSVImporter> Importing file: %s, got error: %s", f.Name(), err.Error()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -121,7 +121,7 @@ func (um *UserMap) ReloadUsers(in string, reply *string) error {
|
||||
if len(um.indexKeys) != 0 {
|
||||
var s string
|
||||
if err := um.AddIndex(um.indexKeys, &s); err != nil {
|
||||
Logger.Err(fmt.Sprintf("Error adding %v indexes to user profile service: %v", um.indexKeys, err))
|
||||
utils.Logger.Err(fmt.Sprintf("Error adding %v indexes to user profile service: %v", um.indexKeys, err))
|
||||
um.table = oldTable
|
||||
um.index = oldIndex
|
||||
um.masked = oldMaksed
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Scheduler) Loop() {
|
||||
}
|
||||
s.Lock()
|
||||
a0 := s.queue[0]
|
||||
//engine.Logger.Info(fmt.Sprintf("Scheduler qeue length: %v", len(s.qeue)))
|
||||
//utils.Logger.Info(fmt.Sprintf("Scheduler qeue length: %v", len(s.qeue)))
|
||||
now := time.Now()
|
||||
start := a0.GetNextStartTime(now)
|
||||
if start.Equal(now) || start.Before(now) {
|
||||
@@ -66,12 +66,12 @@ func (s *Scheduler) Loop() {
|
||||
} else {
|
||||
s.Unlock()
|
||||
d := a0.GetNextStartTime(now).Sub(now)
|
||||
//engine.Logger.Info(fmt.Sprintf("Timer set to wait for %v", d))
|
||||
//utils.Logger.Info(fmt.Sprintf("Timer set to wait for %v", d))
|
||||
s.timer = time.NewTimer(d)
|
||||
select {
|
||||
case <-s.timer.C:
|
||||
// timer has expired
|
||||
engine.Logger.Info(fmt.Sprintf("Time for action on %v", a0))
|
||||
utils.Logger.Info(fmt.Sprintf("Time for action on %v", a0))
|
||||
case <-s.restartLoop:
|
||||
// nothing to do, just continue the loop
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func (s *Scheduler) Loop() {
|
||||
func (s *Scheduler) LoadActionPlans(storage engine.RatingStorage) {
|
||||
actionPlans, err := storage.GetAllActionPlans()
|
||||
if err != nil {
|
||||
engine.Logger.Warning(fmt.Sprintf("Cannot get action plans: %v", err))
|
||||
utils.Logger.Warning(fmt.Sprintf("Cannot get action plans: %v", err))
|
||||
}
|
||||
// recreate the queue
|
||||
s.Lock()
|
||||
@@ -93,14 +93,14 @@ func (s *Scheduler) LoadActionPlans(storage engine.RatingStorage) {
|
||||
newApls := make([]*engine.ActionPlan, 0) // will remove the one time runs from the database
|
||||
for _, ap := range aps {
|
||||
if ap.Timing == nil {
|
||||
engine.Logger.Warning(fmt.Sprintf("<Scheduler> Nil timing on action plan: %+v, discarding!", ap))
|
||||
utils.Logger.Warning(fmt.Sprintf("<Scheduler> Nil timing on action plan: %+v, discarding!", ap))
|
||||
continue
|
||||
}
|
||||
isAsap = ap.IsASAP()
|
||||
toBeSaved = toBeSaved || isAsap
|
||||
if isAsap {
|
||||
if len(ap.AccountIds) > 0 {
|
||||
engine.Logger.Info(fmt.Sprintf("Time for one time action on %v", key))
|
||||
utils.Logger.Info(fmt.Sprintf("Time for one time action on %v", key))
|
||||
}
|
||||
ap.Execute()
|
||||
ap.AccountIds = make([]string, 0)
|
||||
|
||||
@@ -61,7 +61,7 @@ func (sm *FSSessionManager) Connect() error {
|
||||
errChan := make(chan error)
|
||||
for _, connCfg := range sm.cfg.Connections {
|
||||
connId := utils.GenUUID()
|
||||
fSock, err := fsock.NewFSock(connCfg.Server, connCfg.Password, connCfg.Reconnects, sm.createHandlers(), eventFilters, engine.Logger.(*syslog.Writer), connId)
|
||||
fSock, err := fsock.NewFSock(connCfg.Server, connCfg.Password, connCfg.Reconnects, sm.createHandlers(), eventFilters, utils.Logger.(*syslog.Writer), connId)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !fSock.Connected() {
|
||||
@@ -75,7 +75,7 @@ func (sm *FSSessionManager) Connect() error {
|
||||
}
|
||||
}()
|
||||
if fsSenderPool, err := fsock.NewFSockPool(5, connCfg.Server, connCfg.Password, 1,
|
||||
make(map[string][]func(string, string)), make(map[string]string), engine.Logger.(*syslog.Writer), connId); err != nil {
|
||||
make(map[string][]func(string, string)), make(map[string]string), utils.Logger.(*syslog.Writer), connId); err != nil {
|
||||
return fmt.Errorf("Cannot connect FreeSWITCH senders pool, error: %s", err.Error())
|
||||
} else if fsSenderPool == nil {
|
||||
return errors.New("Cannot connect FreeSWITCH senders pool.")
|
||||
@@ -132,26 +132,26 @@ func (sm *FSSessionManager) GetSession(uuid string) *Session {
|
||||
// Disconnects a session by sending hangup command to freeswitch
|
||||
func (sm *FSSessionManager) DisconnectSession(ev engine.Event, connId, notify string) error {
|
||||
if _, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_setvar %s cgr_notify %s\n\n", ev.GetUUID(), notify)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send disconect api notification to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send disconect api notification to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
if notify == INSUFFICIENT_FUNDS {
|
||||
if len(sm.cfg.EmptyBalanceContext) != 0 {
|
||||
if _, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_transfer %s %s %s\n\n", ev.GetUUID(), ev.GetCallDestNr(utils.META_DEFAULT), sm.cfg.EmptyBalanceContext)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not transfer the call to empty balance context, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not transfer the call to empty balance context, error: <%s>, connId: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else if len(sm.cfg.EmptyBalanceAnnFile) != 0 {
|
||||
if _, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_broadcast %s playback!manager_request::%s aleg\n\n", ev.GetUUID(), sm.cfg.EmptyBalanceAnnFile)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send uuid_broadcast to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send uuid_broadcast to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err := sm.conns[connId].SendMsgCmd(ev.GetUUID(), map[string]string{"call-command": "hangup", "hangup-cause": "MANAGER_REQUEST"}); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send disconect msg to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send disconect msg to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -176,7 +176,7 @@ func (sm *FSSessionManager) setMaxCallDuration(uuid, connId string, maxDur time.
|
||||
// _, err := fsock.FS.SendApiCmd(fmt.Sprintf("sched_hangup +%d %s\n\n", int(maxDur.Seconds()), uuid))
|
||||
_, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_setvar %s execute_on_answer sched_hangup +%d alloted_timeout\n\n", uuid, int(maxDur.Seconds())))
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send sched_hangup command to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send sched_hangup command to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -224,12 +224,12 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) {
|
||||
}
|
||||
var maxCallDuration float64 // This will be the maximum duration this channel will be allowed to last
|
||||
if err := sm.rater.GetDerivedMaxSessionTime(ev.AsStoredCdr(config.CgrConfig().DefaultTimezone), &maxCallDuration); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not get max session time for %s, error: %s", ev.GetUUID(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not get max session time for %s, error: %s", ev.GetUUID(), err.Error()))
|
||||
}
|
||||
if maxCallDuration != -1 { // For calls different than unlimited, set limits
|
||||
maxCallDur := time.Duration(maxCallDuration)
|
||||
if maxCallDur <= sm.cfg.MinCallDuration {
|
||||
//engine.Logger.Info(fmt.Sprintf("Not enough credit for trasferring the call %s for %s.", ev.GetUUID(), cd.GetKey(cd.Subject)))
|
||||
//utils.Logger.Info(fmt.Sprintf("Not enough credit for trasferring the call %s for %s.", ev.GetUUID(), cd.GetKey(cd.Subject)))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), INSUFFICIENT_FUNDS)
|
||||
return
|
||||
}
|
||||
@@ -239,13 +239,13 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) {
|
||||
if ev.ComputeLcr() {
|
||||
cd, err := fsev.AsCallDescriptor()
|
||||
if err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_PREPROCESS_ERROR: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_PREPROCESS_ERROR: %s", err.Error()))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR)
|
||||
return
|
||||
}
|
||||
var lcr engine.LCRCost
|
||||
if err = sm.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_API_ERROR: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_API_ERROR: %s", err.Error()))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR)
|
||||
}
|
||||
if lcr.HasErrors() {
|
||||
@@ -254,13 +254,13 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) {
|
||||
return
|
||||
}
|
||||
if supps, err := lcr.SuppliersSlice(); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_ERROR: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_ERROR: %s", err.Error()))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR)
|
||||
return
|
||||
} else {
|
||||
fsArray := SliceAsFsArray(supps)
|
||||
if _, err = sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_setvar %s %s %s\n\n", ev.GetUUID(), utils.CGR_SUPPLIERS, fsArray)); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_ERROR: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> LCR_ERROR: %s", err.Error()))
|
||||
sm.unparkCall(ev.GetUUID(), connId, ev.GetCallDestNr(utils.META_DEFAULT), SYSTEM_ERROR)
|
||||
}
|
||||
}
|
||||
@@ -272,10 +272,10 @@ func (sm *FSSessionManager) onChannelPark(ev engine.Event, connId string) {
|
||||
func (sm *FSSessionManager) unparkCall(uuid, connId, call_dest_nb, notify string) {
|
||||
_, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_setvar %s cgr_notify %s\n\n", uuid, notify))
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send unpark api notification to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send unpark api notification to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
}
|
||||
if _, err = sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_transfer %s %s\n\n", uuid, call_dest_nb)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send unpark api call to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send unpark api call to freeswitch, error: <%s>, connId: %s", err.Error(), connId))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,14 +312,14 @@ func (sm *FSSessionManager) onChannelHangupComplete(ev engine.Event) {
|
||||
}
|
||||
sm.RemoveSession(s.eventStart.GetUUID()) // Unreference it early so we avoid concurrency
|
||||
if err := s.Close(ev); err != nil { // Stop loop, refund advanced charges and save the costs deducted so far to database
|
||||
engine.Logger.Err(err.Error())
|
||||
utils.Logger.Err(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *FSSessionManager) ProcessCdr(storedCdr *engine.StoredCdr) error {
|
||||
var reply string
|
||||
if err := sm.cdrsrv.ProcessCdr(storedCdr, &reply); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", storedCdr.CgrId, storedCdr.AccId, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", storedCdr.CgrId, storedCdr.AccId, err.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -338,24 +338,24 @@ func (sm *FSSessionManager) Rater() engine.Connector {
|
||||
// Called when call goes under the minimum duratio threshold, so FreeSWITCH can play an announcement message
|
||||
func (sm *FSSessionManager) WarnSessionMinDuration(sessionUuid, connId string) {
|
||||
if _, err := sm.conns[connId].SendApiCmd(fmt.Sprintf("uuid_broadcast %s %s aleg\n\n", sessionUuid, sm.cfg.LowBalanceAnnFile)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send uuid_broadcast to freeswitch, error: %s, connection id: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Could not send uuid_broadcast to freeswitch, error: %s, connection id: %s", err.Error(), connId))
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *FSSessionManager) Shutdown() (err error) {
|
||||
for connId, fSock := range sm.conns {
|
||||
if !fSock.Connected() {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Cannot shutdown sessions, fsock not connected for connection id: %s", connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Cannot shutdown sessions, fsock not connected for connection id: %s", connId))
|
||||
continue
|
||||
}
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> Shutting down all sessions on connection id: %s", connId))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITCH> Shutting down all sessions on connection id: %s", connId))
|
||||
if _, err = fSock.SendApiCmd("hupall MANAGER_REQUEST cgr_reqtype *prepaid"); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on calls shutdown: %s, connection id: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on calls shutdown: %s, connection id: %s", err.Error(), connId))
|
||||
}
|
||||
}
|
||||
for guard := 0; len(sm.sessions) > 0 && guard < 20; guard++ {
|
||||
time.Sleep(100 * time.Millisecond) // wait for the hungup event to be fired
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-FreeSWITC> Shutdown waiting on sessions: %v", sm.sessions))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-FreeSWITC> Shutdown waiting on sessions: %v", sm.sessions))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -375,13 +375,13 @@ func (sm *FSSessionManager) SyncSessions() error {
|
||||
for connId, senderPool := range sm.senderPools {
|
||||
fsConn, err := senderPool.PopFSock()
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on syncing active calls, senderPool: %+v, error: %s", senderPool, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on syncing active calls, senderPool: %+v, error: %s", senderPool, err.Error()))
|
||||
continue
|
||||
}
|
||||
activeChanStr, err := fsConn.SendApiCmd("show channels")
|
||||
senderPool.PushFSock(fsConn)
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on syncing active calls, senderPool: %+v, error: %s", senderPool, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on syncing active calls, senderPool: %+v, error: %s", senderPool, err.Error()))
|
||||
continue
|
||||
}
|
||||
aChans := fsock.MapChanData(activeChanStr)
|
||||
@@ -399,7 +399,7 @@ func (sm *FSSessionManager) SyncSessions() error {
|
||||
if stillActive { // No need to do anything since the channel is still there
|
||||
continue
|
||||
}
|
||||
engine.Logger.Warning(fmt.Sprintf("<SM-FreeSWITCH> Sync active channels, stale session detected, uuid: %s", session.eventStart.GetUUID()))
|
||||
utils.Logger.Warning(fmt.Sprintf("<SM-FreeSWITCH> Sync active channels, stale session detected, uuid: %s", session.eventStart.GetUUID()))
|
||||
sm.RemoveSession(session.eventStart.GetUUID()) // Unreference it early so we avoid concurrency
|
||||
fsev := session.eventStart.(FSEvent)
|
||||
now := time.Now()
|
||||
@@ -408,7 +408,7 @@ func (sm *FSSessionManager) SyncSessions() error {
|
||||
fsev[END_TIME] = now.String()
|
||||
fsev[DURATION] = strconv.FormatFloat(dur.Seconds(), 'f', -1, 64)
|
||||
if err := session.Close(fsev); err != nil { // Stop loop, refund advanced charges and save the costs deducted so far to database
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on removing stale session with uuid: %s, error: %s", session.eventStart.GetUUID(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-FreeSWITCH> Error on removing stale session with uuid: %s, error: %s", session.eventStart.GetUUID(), err.Error()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ type KamailioSessionManager struct {
|
||||
func (self *KamailioSessionManager) onCgrAuth(evData []byte, connId string) {
|
||||
kev, err := NewKamEvent(evData)
|
||||
if err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
return
|
||||
}
|
||||
if kev.GetReqType(utils.META_DEFAULT) == utils.META_NONE { // Do not process this request
|
||||
@@ -56,59 +56,59 @@ func (self *KamailioSessionManager) onCgrAuth(evData []byte, connId string) {
|
||||
}
|
||||
if kev.MissingParameter() {
|
||||
if kar, err := kev.AsKamAuthReply(0.0, "", utils.ErrMandatoryIeMissing); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", err.Error()))
|
||||
} else if err = self.conns[connId].Send(kar.String()); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending auth reply %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending auth reply %s", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
var remainingDuration float64
|
||||
var errMaxSession error
|
||||
if errMaxSession = self.rater.GetDerivedMaxSessionTime(kev.AsStoredCdr(self.Timezone()), &remainingDuration); errMaxSession != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Could not get max session time, error: %s", errMaxSession.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Could not get max session time, error: %s", errMaxSession.Error()))
|
||||
}
|
||||
var supplStr string
|
||||
var errSuppl error
|
||||
if kev.ComputeLcr() {
|
||||
if supplStr, errSuppl = self.getSuppliers(kev); errSuppl != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Could not get suppliers, error: %s", errSuppl.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Could not get suppliers, error: %s", errSuppl.Error()))
|
||||
}
|
||||
}
|
||||
if errMaxSession == nil { // Overwrite the error from maxSessionTime with the one from suppliers if nil
|
||||
errMaxSession = errSuppl
|
||||
}
|
||||
if kar, err := kev.AsKamAuthReply(remainingDuration, supplStr, errMaxSession); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", err.Error()))
|
||||
} else if err = self.conns[connId].Send(kar.String()); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending auth reply %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending auth reply %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (self *KamailioSessionManager) onCgrLcrReq(evData []byte, connId string) {
|
||||
kev, err := NewKamEvent(evData)
|
||||
if err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", string(evData), err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", string(evData), err.Error()))
|
||||
return
|
||||
}
|
||||
supplStr, err := self.getSuppliers(kev)
|
||||
kamLcrReply, errReply := kev.AsKamAuthReply(-1.0, supplStr, err)
|
||||
kamLcrReply.Event = CGR_LCR_REPLY // Hit the CGR_LCR_REPLY event route on Kamailio side
|
||||
if errReply != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", errReply.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed building auth reply %s", errReply.Error()))
|
||||
} else if err = self.conns[connId].Send(kamLcrReply.String()); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending lcr reply %s", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending lcr reply %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (self *KamailioSessionManager) getSuppliers(kev KamEvent) (string, error) {
|
||||
cd, err := kev.AsCallDescriptor()
|
||||
if err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-Kamailio> LCR_PREPROCESS_ERROR error: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-Kamailio> LCR_PREPROCESS_ERROR error: %s", err.Error()))
|
||||
return "", errors.New("LCR_PREPROCESS_ERROR")
|
||||
}
|
||||
var lcr engine.LCRCost
|
||||
if err = self.Rater().GetLCR(&engine.AttrGetLcr{CallDescriptor: cd}, &lcr); err != nil {
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-Kamailio> LCR_API_ERROR error: %s", err.Error()))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-Kamailio> LCR_API_ERROR error: %s", err.Error()))
|
||||
return "", errors.New("LCR_API_ERROR")
|
||||
}
|
||||
if lcr.HasErrors() {
|
||||
@@ -121,7 +121,7 @@ func (self *KamailioSessionManager) getSuppliers(kev KamEvent) (string, error) {
|
||||
func (self *KamailioSessionManager) onCallStart(evData []byte, connId string) {
|
||||
kamEv, err := NewKamEvent(evData)
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
return
|
||||
}
|
||||
if kamEv.GetReqType(utils.META_DEFAULT) == utils.META_NONE { // Do not process this request
|
||||
@@ -140,14 +140,14 @@ func (self *KamailioSessionManager) onCallStart(evData []byte, connId string) {
|
||||
func (self *KamailioSessionManager) onCallEnd(evData []byte, connId string) {
|
||||
kev, err := NewKamEvent(evData)
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> ERROR unmarshalling event: %s, error: %s", evData, err.Error()))
|
||||
return
|
||||
}
|
||||
if kev.GetReqType(utils.META_DEFAULT) == utils.META_NONE { // Do not process this request
|
||||
return
|
||||
}
|
||||
if kev.MissingParameter() {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Mandatory IE missing out of event: %+v", kev))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Mandatory IE missing out of event: %+v", kev))
|
||||
}
|
||||
go self.ProcessCdr(kev.AsStoredCdr(self.Timezone()))
|
||||
s := self.GetSession(kev.GetUUID())
|
||||
@@ -156,7 +156,7 @@ func (self *KamailioSessionManager) onCallEnd(evData []byte, connId string) {
|
||||
}
|
||||
self.RemoveSession(s.eventStart.GetUUID()) // Unreference it early so we avoid concurrency
|
||||
if err := s.Close(kev); err != nil { // Stop loop, refund advanced charges and save the costs deducted so far to database
|
||||
engine.Logger.Err(err.Error())
|
||||
utils.Logger.Err(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ func (self *KamailioSessionManager) Connect() error {
|
||||
errChan := make(chan error)
|
||||
for _, connCfg := range self.cfg.Connections {
|
||||
connId := utils.GenUUID()
|
||||
if self.conns[connId], err = kamevapi.NewKamEvapi(connCfg.EvapiAddr, connId, connCfg.Reconnects, eventHandlers, engine.Logger.(*syslog.Writer)); err != nil {
|
||||
if self.conns[connId], err = kamevapi.NewKamEvapi(connCfg.EvapiAddr, connId, connCfg.Reconnects, eventHandlers, utils.Logger.(*syslog.Writer)); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() { // Start reading in own goroutine, return on error
|
||||
@@ -188,7 +188,7 @@ func (self *KamailioSessionManager) DisconnectSession(ev engine.Event, connId, n
|
||||
sessionIds := ev.GetSessionIds()
|
||||
disconnectEv := &KamSessionDisconnect{Event: CGR_SESSION_DISCONNECT, HashEntry: sessionIds[0], HashId: sessionIds[1], Reason: notify}
|
||||
if err := self.conns[connId].Send(disconnectEv.String()); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending disconnect request, error %s, connection id: %s", err.Error(), connId))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed sending disconnect request, error %s, connection id: %s", err.Error(), connId))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -227,7 +227,7 @@ func (self *KamailioSessionManager) ProcessCdr(cdr *engine.StoredCdr) error {
|
||||
}
|
||||
var reply string
|
||||
if err := self.cdrsrv.ProcessCdr(cdr, &reply); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", cdr.CgrId, cdr.AccId, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-Kamailio> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", cdr.CgrId, cdr.AccId, err.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -116,10 +116,10 @@ func (osm *OsipsSessionManager) Connect() (err error) {
|
||||
go osm.SubscribeEvents(osm.evSubscribeStop)
|
||||
evsrv, err := osipsdagram.NewEventServer(osm.cfg.ListenUdp, osm.eventHandlers)
|
||||
if err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Cannot initialize datagram server, error: <%s>", err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Cannot initialize datagram server, error: <%s>", err.Error()))
|
||||
return
|
||||
}
|
||||
engine.Logger.Info(fmt.Sprintf("<SM-OpenSIPS> Listening for datagram events at <%s>", osm.cfg.ListenUdp))
|
||||
utils.Logger.Info(fmt.Sprintf("<SM-OpenSIPS> Listening for datagram events at <%s>", osm.cfg.ListenUdp))
|
||||
evsrv.ServeEvents(osm.stopServing) // Will break through stopServing on error in other places
|
||||
return errors.New("<SM-OpenSIPS> Stopped reading events")
|
||||
}
|
||||
@@ -170,16 +170,16 @@ func (osm *OsipsSessionManager) DisconnectSession(ev engine.Event, connId, notif
|
||||
sessionIds := ev.GetSessionIds()
|
||||
if len(sessionIds) != 2 {
|
||||
errMsg := fmt.Sprintf("Failed disconnecting session for event: %+v, notify: %s, dialogId: %v", ev, notify, sessionIds)
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> " + errMsg))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> " + errMsg))
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
cmd := fmt.Sprintf(":dlg_end_dlg:\n%s\n%s\n\n", sessionIds[0], sessionIds[1])
|
||||
if reply, err := osm.miConn.SendCommand([]byte(cmd)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed disconnecting session for event: %+v, notify: %s, dialogId: %v, error: <%s>", ev, notify, sessionIds, err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed disconnecting session for event: %+v, notify: %s, dialogId: %v, error: <%s>", ev, notify, sessionIds, err))
|
||||
return err
|
||||
} else if !bytes.HasPrefix(reply, []byte("200 OK")) {
|
||||
errStr := fmt.Sprintf("Failed disconnecting session for event: %+v, notify: %s, dialogId: %v", ev, notify, sessionIds)
|
||||
engine.Logger.Err("<SM-OpenSIPS> " + errStr)
|
||||
utils.Logger.Err("<SM-OpenSIPS> " + errStr)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
return nil
|
||||
@@ -219,10 +219,10 @@ func (osm *OsipsSessionManager) subscribeEvents() error {
|
||||
}
|
||||
cmd := fmt.Sprintf(":event_subscribe:\n%s\nudp:%s:%s\n%d\n", eventName, addrListen, portListen, int(subscribeInterval.Seconds()))
|
||||
if reply, err := osm.miConn.SendCommand([]byte(cmd)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed subscribing to OpenSIPS at address: <%s>, error: <%s>", osm.cfg.MiAddr, err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed subscribing to OpenSIPS at address: <%s>, error: <%s>", osm.cfg.MiAddr, err))
|
||||
return err
|
||||
} else if !bytes.HasPrefix(reply, []byte("200 OK")) {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed subscribing to OpenSIPS at address: <%s>", osm.cfg.MiAddr))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed subscribing to OpenSIPS at address: <%s>", osm.cfg.MiAddr))
|
||||
return errors.New("Failed subscribing to OpenSIPS events")
|
||||
}
|
||||
}
|
||||
@@ -240,7 +240,7 @@ func (osm *OsipsSessionManager) onOpensipsStart(cdrDagram *osipsdagram.OsipsEven
|
||||
func (osm *OsipsSessionManager) onCdr(cdrDagram *osipsdagram.OsipsEvent) {
|
||||
osipsEv, _ := NewOsipsEvent(cdrDagram)
|
||||
if err := osm.ProcessCdr(osipsEv.AsStoredCdr(osm.timezone)); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", osipsEv.GetCgrId(osm.timezone), osipsEv.GetUUID(), err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CDR, cgrid: %s, accid: %s, error: <%s>", osipsEv.GetCgrId(osm.timezone), osipsEv.GetUUID(), err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,17 +252,17 @@ func (osm *OsipsSessionManager) onAccEvent(osipsDgram *osipsdagram.OsipsEvent) {
|
||||
}
|
||||
if osipsDgram.AttrValues["method"] == "INVITE" { // Call start
|
||||
if err := osm.callStart(osipsEv); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CALL_START out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CALL_START out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
}
|
||||
if err := osm.processCdrStart(osipsEv); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing cdr start out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing cdr start out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
}
|
||||
} else if osipsDgram.AttrValues["method"] == "BYE" {
|
||||
if err := osm.callEnd(osipsEv); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CALL_END out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing CALL_END out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
}
|
||||
if err := osm.processCdrStop(osipsEv); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing cdr stop out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM-OpenSIPS> Failed processing cdr stop out of %+v, error: <%s>", osipsDgram, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,11 +84,11 @@ func (s *Session) debitLoop(runIdx int) {
|
||||
}
|
||||
nextCd.TimeEnd = nextCd.TimeStart.Add(debitPeriod)
|
||||
nextCd.LoopIndex = index
|
||||
//engine.Logger.Debug(fmt.Sprintf("NEXTCD: %s", utils.ToJSON(nextCd)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("NEXTCD: %s", utils.ToJSON(nextCd)))
|
||||
nextCd.DurationIndex += debitPeriod // first presumed duration
|
||||
cc := new(engine.CallCost)
|
||||
if err := s.sessionManager.Rater().MaxDebit(nextCd, cc); err != nil {
|
||||
engine.Logger.Err(fmt.Sprintf("Could not complete debit opperation: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("Could not complete debit opperation: %v", err))
|
||||
s.sessionManager.DisconnectSession(s.eventStart, s.connId, SYSTEM_ERROR)
|
||||
return
|
||||
}
|
||||
@@ -100,9 +100,9 @@ func (s *Session) debitLoop(runIdx int) {
|
||||
s.sessionManager.WarnSessionMinDuration(s.eventStart.GetUUID(), s.connId)
|
||||
}
|
||||
s.sessionRuns[runIdx].CallCosts = append(s.sessionRuns[runIdx].CallCosts, cc)
|
||||
//engine.Logger.Debug(fmt.Sprintf("CALLCOST: %s", utils.ToJSON(cc)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("CALLCOST: %s", utils.ToJSON(cc)))
|
||||
nextCd.TimeEnd = cc.GetEndTime() // set debited timeEnd
|
||||
//engine.Logger.Debug(fmt.Sprintf("NEXTCD: %s DURATION: %s", utils.ToJSON(nextCd), nextCd.GetDuration().String()))
|
||||
//utils.Logger.Debug(fmt.Sprintf("NEXTCD: %s DURATION: %s", utils.ToJSON(nextCd), nextCd.GetDuration().String()))
|
||||
// update call duration with real debited duration
|
||||
nextCd.DurationIndex -= debitPeriod
|
||||
nextCd.DurationIndex += cc.GetDuration()
|
||||
@@ -116,7 +116,7 @@ func (s *Session) debitLoop(runIdx int) {
|
||||
func (s *Session) Close(ev engine.Event) error {
|
||||
close(s.stopDebit) // Close the channel so all the sessionRuns listening will be notified
|
||||
if _, err := ev.GetEndTime(); err != nil {
|
||||
engine.Logger.Err("Error parsing event stop time.")
|
||||
utils.Logger.Err("Error parsing event stop time.")
|
||||
for idx := range s.sessionRuns {
|
||||
s.sessionRuns[idx].CallDescriptor.TimeEnd = s.sessionRuns[idx].CallDescriptor.TimeStart.Add(s.sessionRuns[idx].CallDescriptor.DurationIndex)
|
||||
}
|
||||
@@ -127,24 +127,24 @@ func (s *Session) Close(ev engine.Event) error {
|
||||
if len(sr.CallCosts) == 0 {
|
||||
continue // why would we have 0 callcosts
|
||||
}
|
||||
//engine.Logger.Debug(fmt.Sprintf("ALL CALLCOSTS: %s", utils.ToJSON(sr.CallCosts)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("ALL CALLCOSTS: %s", utils.ToJSON(sr.CallCosts)))
|
||||
lastCC := sr.CallCosts[len(sr.CallCosts)-1]
|
||||
lastCC.Timespans.Decompress()
|
||||
// put credit back
|
||||
startTime, err := ev.GetAnswerTime(sr.DerivedCharger.AnswerTimeField, s.sessionManager.Timezone())
|
||||
if err != nil {
|
||||
engine.Logger.Crit("Error parsing prepaid call start time from event")
|
||||
utils.Logger.Crit("Error parsing prepaid call start time from event")
|
||||
return err
|
||||
}
|
||||
duration, err := ev.GetDuration(sr.DerivedCharger.UsageField)
|
||||
if err != nil {
|
||||
engine.Logger.Crit(fmt.Sprintf("Error parsing call duration from event %s", err.Error()))
|
||||
utils.Logger.Crit(fmt.Sprintf("Error parsing call duration from event %s", err.Error()))
|
||||
return err
|
||||
}
|
||||
hangupTime := startTime.Add(duration)
|
||||
//engine.Logger.Debug(fmt.Sprintf("BEFORE REFUND: %s", utils.ToJSON(lastCC)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("BEFORE REFUND: %s", utils.ToJSON(lastCC)))
|
||||
err = s.Refund(lastCC, hangupTime)
|
||||
//engine.Logger.Debug(fmt.Sprintf("AFTER REFUND: %s", utils.ToJSON(lastCC)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("AFTER REFUND: %s", utils.ToJSON(lastCC)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func (s *Session) Close(ev engine.Event) error {
|
||||
func (s *Session) Refund(lastCC *engine.CallCost, hangupTime time.Time) error {
|
||||
end := lastCC.Timespans[len(lastCC.Timespans)-1].TimeEnd
|
||||
refundDuration := end.Sub(hangupTime)
|
||||
//engine.Logger.Debug(fmt.Sprintf("HANGUPTIME: %s REFUNDDURATION: %s", hangupTime.String(), refundDuration.String()))
|
||||
//utils.Logger.Debug(fmt.Sprintf("HANGUPTIME: %s REFUNDDURATION: %s", hangupTime.String(), refundDuration.String()))
|
||||
var refundIncrements engine.Increments
|
||||
for i := len(lastCC.Timespans) - 1; i >= 0; i-- {
|
||||
ts := lastCC.Timespans[i]
|
||||
@@ -191,7 +191,7 @@ func (s *Session) Refund(lastCC *engine.CallCost, hangupTime time.Time) error {
|
||||
}
|
||||
}
|
||||
// show only what was actualy refunded (stopped in timespan)
|
||||
// engine.Logger.Info(fmt.Sprintf("Refund duration: %v", initialRefundDuration-refundDuration))
|
||||
// utils.Logger.Info(fmt.Sprintf("Refund duration: %v", initialRefundDuration-refundDuration))
|
||||
if len(refundIncrements) > 0 {
|
||||
cd := &engine.CallDescriptor{
|
||||
Direction: lastCC.Direction,
|
||||
@@ -208,7 +208,7 @@ func (s *Session) Refund(lastCC *engine.CallCost, hangupTime time.Time) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//engine.Logger.Debug(fmt.Sprintf("REFUND INCR: %s", utils.ToJSON(refundIncrements)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("REFUND INCR: %s", utils.ToJSON(refundIncrements)))
|
||||
lastCC.Cost -= refundIncrements.GetTotalCost()
|
||||
lastCC.Timespans.Compress()
|
||||
return nil
|
||||
@@ -228,10 +228,10 @@ func (s *Session) SaveOperations() {
|
||||
}
|
||||
firstCC := sr.CallCosts[0]
|
||||
for _, cc := range sr.CallCosts[1:] {
|
||||
//engine.Logger.Debug(fmt.Sprintf("BEFORE MERGE: %s", utils.ToJSON(firstCC)))
|
||||
//engine.Logger.Debug(fmt.Sprintf("OTHER MERGE: %s", utils.ToJSON(cc)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("BEFORE MERGE: %s", utils.ToJSON(firstCC)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("OTHER MERGE: %s", utils.ToJSON(cc)))
|
||||
firstCC.Merge(cc)
|
||||
//engine.Logger.Debug(fmt.Sprintf("AFTER MERGE: %s", utils.ToJSON(firstCC)))
|
||||
//utils.Logger.Debug(fmt.Sprintf("AFTER MERGE: %s", utils.ToJSON(firstCC)))
|
||||
}
|
||||
|
||||
var reply string
|
||||
@@ -249,7 +249,7 @@ func (s *Session) SaveOperations() {
|
||||
if err == utils.ErrExists {
|
||||
s.Refund(firstCC, firstCC.Timespans[0].TimeStart)
|
||||
} else {
|
||||
engine.Logger.Err(fmt.Sprintf("<SM> ERROR failed to log call cost: %v", err))
|
||||
utils.Logger.Err(fmt.Sprintf("<SM> ERROR failed to log call cost: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,18 +20,15 @@ package utils
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -289,30 +286,6 @@ func InfieldSplit(val string) []string {
|
||||
return strings.Split(val, INFIELD_SEP)
|
||||
}
|
||||
|
||||
func HttpJsonPost(url string, skipTlsVerify bool, content interface{}) ([]byte, error) {
|
||||
body, err := json.Marshal(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: skipTlsVerify},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
resp, err := client.Post(url, "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode > 299 {
|
||||
return respBody, fmt.Errorf("Unexpected status code received: %d", resp.StatusCode)
|
||||
}
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
func Unzip(src, dest string) error {
|
||||
r, err := zip.OpenReader(src)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,9 +19,22 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
var Logger LoggerInterface
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
Logger, err = syslog.New(syslog.LOG_INFO, "CGRateS")
|
||||
if err != nil {
|
||||
Logger = new(StdLogger)
|
||||
Logger.Err(fmt.Sprintf("Could not connect to syslog: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
type LoggerInterface interface {
|
||||
Alert(m string) error
|
||||
Close() error
|
||||
|
||||
Reference in New Issue
Block a user