mirror of
https://github.com/cgrates/cgrates.git
synced 2026-02-11 18:16:24 +05:00
309 lines
9.9 KiB
Go
309 lines
9.9 KiB
Go
/*
|
|
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
|
|
Copyright (C) ITsysCOM GmbH
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
it under the terms of the GNU Affero General Public License as published by
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU Affero General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Affero General Public License
|
|
along with this program. If not, see <https://www.gnu.org/licenses/>
|
|
*/
|
|
|
|
package ers
|
|
|
|
import (
|
|
"bufio"
|
|
"encoding/csv"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/cgrates/ltcache"
|
|
|
|
"github.com/cgrates/cgrates/agents"
|
|
"github.com/cgrates/cgrates/config"
|
|
"github.com/cgrates/cgrates/engine"
|
|
"github.com/cgrates/cgrates/utils"
|
|
)
|
|
|
|
func NewFlatstoreER(cfg *config.CGRConfig, cfgIdx int,
|
|
rdrEvents chan *erEvent, rdrErr chan error,
|
|
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
|
|
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
|
|
if strings.HasSuffix(srcPath, utils.Slash) {
|
|
srcPath = srcPath[:len(srcPath)-1]
|
|
}
|
|
flatER := &FlatstoreER{
|
|
cgrCfg: cfg,
|
|
cfgIdx: cfgIdx,
|
|
fltrS: fltrS,
|
|
rdrDir: srcPath,
|
|
rdrEvents: rdrEvents,
|
|
rdrError: rdrErr,
|
|
rdrExit: rdrExit,
|
|
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs),
|
|
}
|
|
var processFile struct{}
|
|
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
|
|
flatER.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
|
|
}
|
|
flatER.cache = ltcache.NewCache(ltcache.UnlimitedCaching, cfg.ERsCfg().Readers[cfgIdx].PartialRecordCache, false, flatER.dumpToFile)
|
|
return flatER, err
|
|
}
|
|
|
|
// FlatstoreER implements EventReader interface for Flatstore CDR
|
|
type FlatstoreER struct {
|
|
sync.RWMutex
|
|
cgrCfg *config.CGRConfig
|
|
cfgIdx int // index of config instance within ERsCfg.Readers
|
|
fltrS *engine.FilterS
|
|
cache *ltcache.Cache
|
|
rdrDir string
|
|
rdrEvents chan *erEvent // channel to dispatch the events created to
|
|
rdrError chan error
|
|
rdrExit chan struct{}
|
|
conReqs chan struct{} // limit number of opened files
|
|
}
|
|
|
|
func (rdr *FlatstoreER) Config() *config.EventReaderCfg {
|
|
return rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx]
|
|
}
|
|
|
|
func (rdr *FlatstoreER) Serve() (err error) {
|
|
switch rdr.Config().RunDelay {
|
|
case time.Duration(0): // 0 disables the automatic read, maybe done per API
|
|
return
|
|
case time.Duration(-1):
|
|
return watchDir(rdr.rdrDir, rdr.processFile,
|
|
utils.ERs, rdr.rdrExit)
|
|
default:
|
|
go func() {
|
|
for {
|
|
// Not automated, process and sleep approach
|
|
select {
|
|
case <-rdr.rdrExit:
|
|
utils.Logger.Info(
|
|
fmt.Sprintf("<%s> stop monitoring path <%s>",
|
|
utils.ERs, rdr.rdrDir))
|
|
return
|
|
default:
|
|
}
|
|
filesInDir, _ := os.ReadDir(rdr.rdrDir)
|
|
for _, file := range filesInDir {
|
|
if !strings.HasSuffix(file.Name(), utils.CSVSuffix) { // hardcoded file extension for csv event reader
|
|
continue // used in order to filter the files from directory
|
|
}
|
|
go func(fileName string) {
|
|
if err := rdr.processFile(rdr.rdrDir, fileName); err != nil {
|
|
utils.Logger.Warning(
|
|
fmt.Sprintf("<%s> processing file %s, error: %s",
|
|
utils.ERs, fileName, err.Error()))
|
|
}
|
|
}(file.Name())
|
|
}
|
|
time.Sleep(rdr.Config().RunDelay)
|
|
}
|
|
}()
|
|
}
|
|
return
|
|
}
|
|
|
|
// processFile is called for each file in a directory and dispatches erEvents from it
|
|
func (rdr *FlatstoreER) processFile(fPath, fName string) (err error) {
|
|
if cap(rdr.conReqs) != 0 { // 0 goes for no limit
|
|
processFile := <-rdr.conReqs // Queue here for maxOpenFiles
|
|
defer func() { rdr.conReqs <- processFile }()
|
|
}
|
|
absPath := path.Join(fPath, fName)
|
|
utils.Logger.Info(
|
|
fmt.Sprintf("<%s> parsing <%s>", utils.ERs, absPath))
|
|
var file *os.File
|
|
if file, err = os.Open(absPath); err != nil {
|
|
return
|
|
}
|
|
defer file.Close()
|
|
csvReader := csv.NewReader(bufio.NewReader(file))
|
|
csvReader.FieldsPerRecord = rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx].RowLength
|
|
csvReader.Comma = ','
|
|
if len(rdr.Config().FieldSep) > 0 {
|
|
csvReader.Comma = rune(rdr.Config().FieldSep[0])
|
|
}
|
|
csvReader.Comment = '#'
|
|
rowNr := 0 // This counts the rows in the file, not really number of CDRs
|
|
evsPosted := 0
|
|
timeStart := time.Now()
|
|
reqVars := utils.NavigableMap2{utils.MetaFileName: utils.NewNMData(fName)}
|
|
for {
|
|
var record []string
|
|
if record, err = csvReader.Read(); err != nil {
|
|
if err == io.EOF {
|
|
break
|
|
}
|
|
return
|
|
}
|
|
if strings.HasPrefix(fName, rdr.Config().FailedCallsPrefix) { // Use the first index since they should be the same in all configs
|
|
record = append(record, "0") // Append duration 0 for failed calls flatstore CDR
|
|
} else {
|
|
pr, err := NewUnpairedRecord(record, rdr.Config().Timezone, fName)
|
|
if err != nil {
|
|
utils.Logger.Err(
|
|
fmt.Sprintf("<%s> Converting row : <%s> to unpairedRecord , ignoring due to error: <%s>",
|
|
utils.ERs, record, err.Error()),
|
|
)
|
|
continue
|
|
}
|
|
if val, has := rdr.cache.Get(pr.OriginID); !has {
|
|
rdr.cache.Set(pr.OriginID, pr, nil)
|
|
continue
|
|
} else {
|
|
pair := val.(*UnpairedRecord)
|
|
record, err = pairToRecord(pair, pr)
|
|
if err != nil {
|
|
utils.Logger.Err(
|
|
fmt.Sprintf("<%s> Merging unpairedRecords : <%s> and <%s> to record , ignoring due to error: <%s>",
|
|
utils.ERs, utils.ToJSON(pair), utils.ToJSON(pr), err.Error()),
|
|
)
|
|
continue
|
|
}
|
|
rdr.cache.Remove(pr.OriginID)
|
|
}
|
|
}
|
|
|
|
// build Usage from Fields based on record length
|
|
for i, cntFld := range rdr.Config().Fields {
|
|
if cntFld.Path == utils.MetaCgreq+utils.NestingSep+utils.Usage {
|
|
rdr.Config().Fields[i].Value = config.NewRSRParsersMustCompile("~*req."+strconv.Itoa(len(record)-1), true, utils.INFIELD_SEP) // in case of flatstore, last element will be the duration computed by us
|
|
}
|
|
}
|
|
rowNr++ // increment the rowNr after checking if it's not the end of file
|
|
agReq := agents.NewAgentRequest(
|
|
config.NewSliceDP(record), reqVars,
|
|
nil, nil, rdr.Config().Tenant,
|
|
rdr.cgrCfg.GeneralCfg().DefaultTenant,
|
|
utils.FirstNonEmpty(rdr.Config().Timezone,
|
|
rdr.cgrCfg.GeneralCfg().DefaultTimezone),
|
|
rdr.fltrS, nil, nil) // create an AgentRequest
|
|
if pass, err := rdr.fltrS.Pass(agReq.Tenant, rdr.Config().Filters,
|
|
agReq); err != nil || !pass {
|
|
continue
|
|
}
|
|
if err := agReq.SetFields(rdr.Config().Fields); err != nil {
|
|
utils.Logger.Warning(
|
|
fmt.Sprintf("<%s> reading file: <%s> row <%d>, ignoring due to error: <%s>",
|
|
utils.ERs, absPath, rowNr, err.Error()))
|
|
continue
|
|
}
|
|
|
|
rdr.rdrEvents <- &erEvent{
|
|
cgrEvent: config.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep),
|
|
rdrCfg: rdr.Config(),
|
|
}
|
|
evsPosted++
|
|
}
|
|
if rdr.Config().ProcessedPath != "" {
|
|
// Finished with file, move it to processed folder
|
|
outPath := path.Join(rdr.Config().ProcessedPath, fName)
|
|
if err = os.Rename(absPath, outPath); err != nil {
|
|
return
|
|
}
|
|
}
|
|
|
|
utils.Logger.Info(
|
|
fmt.Sprintf("%s finished processing file <%s>. Total records processed: %d, events posted: %d, run duration: %s",
|
|
utils.ERs, absPath, rowNr, evsPosted, time.Now().Sub(timeStart)))
|
|
return
|
|
}
|
|
|
|
func NewUnpairedRecord(record []string, timezone string, fileName string) (*UnpairedRecord, error) {
|
|
if len(record) < 7 {
|
|
return nil, errors.New("MISSING_IE")
|
|
}
|
|
pr := &UnpairedRecord{Method: record[0], OriginID: record[3] + record[1] + record[2], Values: record, FileName: fileName}
|
|
var err error
|
|
if pr.Timestamp, err = utils.ParseTimeDetectLayout(record[6], timezone); err != nil {
|
|
return nil, err
|
|
}
|
|
return pr, nil
|
|
}
|
|
|
|
// This is a partial record received from Flatstore, can be INVITE or BYE and it needs to be paired in order to produce duration
|
|
type UnpairedRecord struct {
|
|
Method string // INVITE or BYE
|
|
OriginID string // Copute here the OriginID
|
|
Timestamp time.Time // Timestamp of the event, as written by db_flastore module
|
|
Values []string // Can contain original values or updated via UpdateValues
|
|
FileName string
|
|
}
|
|
|
|
// Pairs INVITE and BYE into final record containing as last element the duration
|
|
func pairToRecord(part1, part2 *UnpairedRecord) ([]string, error) {
|
|
var invite, bye *UnpairedRecord
|
|
if part1.Method == "INVITE" {
|
|
invite = part1
|
|
} else if part2.Method == "INVITE" {
|
|
invite = part2
|
|
} else {
|
|
return nil, errors.New("MISSING_INVITE")
|
|
}
|
|
if part1.Method == "BYE" {
|
|
bye = part1
|
|
} else if part2.Method == "BYE" {
|
|
bye = part2
|
|
} else {
|
|
return nil, errors.New("MISSING_BYE")
|
|
}
|
|
if len(invite.Values) != len(bye.Values) {
|
|
return nil, errors.New("INCONSISTENT_VALUES_LENGTH")
|
|
}
|
|
record := invite.Values
|
|
for idx := range record {
|
|
switch idx {
|
|
case 0, 1, 2, 3, 6: // Leave these values as they are
|
|
case 4, 5:
|
|
record[idx] = bye.Values[idx] // Update record with status from bye
|
|
default:
|
|
if bye.Values[idx] != "" { // Any value higher than 6 is dynamically inserted, overwrite if non empty
|
|
record[idx] = bye.Values[idx]
|
|
}
|
|
|
|
}
|
|
}
|
|
callDur := bye.Timestamp.Sub(invite.Timestamp)
|
|
record = append(record, strconv.FormatFloat(callDur.Seconds(), 'f', -1, 64))
|
|
return record, nil
|
|
}
|
|
|
|
func (rdr *FlatstoreER) dumpToFile(itmID string, value any) {
|
|
unpRcd := value.(*UnpairedRecord)
|
|
|
|
dumpFilePath := path.Join(rdr.Config().ProcessedPath, unpRcd.FileName+utils.TmpSuffix)
|
|
fileOut, err := os.Create(dumpFilePath)
|
|
if err != nil {
|
|
utils.Logger.Err(fmt.Sprintf("<%s> Failed creating %s, error: %s",
|
|
utils.ERs, dumpFilePath, err.Error()))
|
|
return
|
|
}
|
|
csvWriter := csv.NewWriter(fileOut)
|
|
csvWriter.Comma = rune(rdr.Config().FieldSep[0])
|
|
if err = csvWriter.Write(unpRcd.Values); err != nil {
|
|
utils.Logger.Err(fmt.Sprintf("<%s> Failed writing partial record %v to file: %s, error: %s",
|
|
utils.ERs, unpRcd.Values, dumpFilePath, err.Error()))
|
|
return
|
|
}
|
|
|
|
csvWriter.Flush()
|
|
}
|