Added partial reader

This commit is contained in:
Trial97
2021-05-13 17:26:04 +03:00
committed by Dan Christian Bogos
parent b4ae5bda72
commit d868c0a5f0
46 changed files with 2672 additions and 1201 deletions

View File

@@ -199,10 +199,10 @@ func headerLen(a *diam.AVP) int {
return 8
}
func updateAVPLenght(avps []*diam.AVP) (l int) {
func updateAVPLength(avps []*diam.AVP) (l int) {
for _, avp := range avps {
if v, ok := (avp.Data).(*diam.GroupedAVP); ok {
avp.Length = headerLen(avp) + updateAVPLenght(v.AVP)
avp.Length = headerLen(avp) + updateAVPLength(v.AVP)
}
l += avp.Length
}

View File

@@ -322,7 +322,7 @@ var posibleLoaderTypes = utils.NewStringSet([]string{utils.MetaAttributes,
var possibleReaderTypes = utils.NewStringSet([]string{utils.MetaFileCSV,
utils.MetaKafkajsonMap, utils.MetaFileXML, utils.MetaSQL, utils.MetaFileFWV,
utils.MetaPartialCSV, utils.MetaFlatstore, utils.MetaFileJSON, utils.MetaNone})
utils.MetaFileJSON, utils.MetaNone})
var possibleExporterTypes = utils.NewStringSet([]string{utils.MetaFileCSV, utils.MetaNone, utils.MetaFileFWV,
utils.MetaHTTPPost, utils.MetaHTTPjsonMap, utils.MetaAMQPjsonMap, utils.MetaAMQPV1jsonMap, utils.MetaSQSjsonMap,

View File

@@ -295,9 +295,12 @@ const CGRATES_CFG_JSON = `
},
"ers": { // EventReaderService
"enabled": false, // starts the EventReader service: <true|false>
"sessions_conns":["*internal"], // RPC Connections IDs
"ers": { // EventReaderService
"enabled": false, // starts the EventReader service: <true|false>
"sessions_conns":["*internal"], // RPC Connections IDs
"partial_cache_ttl": "1s", // the duration to cache partial records when not pairing
"partial_cache_action": "*post_cdr", // the action that will be exeuted for the partial CSVs that are not matched<*post_cdr|*dump_to_file>
// "partial_path": "/var/spool/cgrates/ers/partial", // the path were the partial events will be sent
"readers": [
{
"id": "*default", // identifier of the EventReader profile
@@ -307,26 +310,18 @@ const CGRATES_CFG_JSON = `
"source_path": "/var/spool/cgrates/ers/in", // read data from this path
"processed_path": "/var/spool/cgrates/ers/out", // move processed data here
"opts": {
// FileCSV and PartialCSV
// Partial
// "partialPath": "/", // the path were the partial events will be sent
// "partialCacheAction": "*post_cdr", // the action that will be exeuted for the partial CSVs that are not matched<*post_cdr|*dump_to_file>
"partialOrderField": "~*req.AnswerTime", // the field after what the events are order when merged
// "partialcsvFieldSeparator": "," // separator used when dumping the fields
// FileCSV
"csvRowLength": 0, // Number of fields from csv file
"csvFieldSeparator": ",", // separator used when reading the fields
"csvHeaderDefineChar": ":", // the starting character for header definition used in case of CSV files
// "csvLazyQuotes": false, // if a quote may appear in an unquoted field and a non-doubled quote may appear in a quoted field
// PartialCSV
"csvCacheExpiryAction": "*post_cdr", // the action that will be exeuted for the partial CSVs that are not matched<*post_cdr|*dump_to_file>
// "csvRecordCacheTTL": "1s" // Duration to cache partial records when not pairing
// FlatStore
"fstRowLength": 0, // Number of fields from csv file
"fstFieldSeparator": ",", // separator used when reading the fields
// "fstFailedCallsPrefix": "" // Used in case of flatstore CDRs to avoid searching for BYE records
// "fstRecordCacheTTL": "1s" // Duration to cache partial records when not pairing
// "fstLazyQuotes": false, // if a quote may appear in an unquoted field and a non-doubled quote may appear in a quoted field
"fstMethod": "~*req.0", // the rsr parser that will determine the method of the current record
"fstOriginID": "~*req.3;~*req.1;~*req.2", // the rsr parser that will determine the originID of the current record
"fstMadatoryACK": false, // if we should receive the ACK before processing the record
// FileXML
"xmlRootPath": "", // path towards one event in case of XML CDRs
@@ -400,6 +395,7 @@ const CGRATES_CFG_JSON = `
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.12", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.13", "mandatory": true},
],
"partial_commit_fields": [],
"cache_dump_fields": [],
},
],
@@ -465,7 +461,7 @@ const CGRATES_CFG_JSON = `
// "awsKey": "", // AWSKey
// "awsSecret": "", // AWSSecret
// "awsToken": "", // AWSToken
// "s3FolderPath": "", // AWSFolderPath
// "s3FolderPath": "", // S3FolderPath
}, // extra options for exporter
"tenant": "", // tenant used in filterS.Pass

View File

@@ -475,8 +475,12 @@ func (cfg *CGRConfig) checkConfigSanity() error {
}
switch rdr.Type {
case utils.MetaFileCSV, utils.MetaPartialCSV:
for _, dir := range []string{rdr.ProcessedPath, rdr.SourcePath} {
case utils.MetaFileCSV:
paths := []string{rdr.ProcessedPath, rdr.SourcePath}
if rdr.ProcessedPath == utils.EmptyString {
paths = []string{rdr.SourcePath}
}
for _, dir := range paths {
if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {
return fmt.Errorf("<%s> nonexistent folder: %s for reader with ID: %s", utils.ERs, dir, rdr.ID)
}
@@ -495,42 +499,6 @@ func (cfg *CGRConfig) checkConfigSanity() error {
return fmt.Errorf("<%s> error when converting %s: <%s> for reader with ID: %s", utils.ERs, utils.CSV+utils.LazyQuotes, err.Error(), rdr.ID)
}
}
if rdr.Type == utils.MetaPartialCSV {
if act, has := rdr.Opts[utils.PartialCSVCacheExpiryActionOpt]; has && (utils.IfaceAsString(act) != utils.MetaDumpToFile &&
utils.IfaceAsString(act) != utils.MetaPostCDR) {
return fmt.Errorf("<%s> wrong partial expiry action for reader with ID: %s", utils.ERs, rdr.ID)
}
if ttl, has := rdr.Opts[utils.PartialCSVRecordCacheOpt]; has {
if _, err := utils.IfaceAsDuration(ttl); err != nil {
return fmt.Errorf("<%s> error when converting %s: <%s> for reader with ID: %s", utils.ERs, utils.PartialCSVRecordCacheOpt, err.Error(), rdr.ID)
}
}
}
case utils.MetaFlatstore:
for _, dir := range []string{rdr.ProcessedPath, rdr.SourcePath} {
if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {
return fmt.Errorf("<%s> nonexistent folder: %s for reader with ID: %s", utils.ERs, dir, rdr.ID)
}
}
if fldSep, has := rdr.Opts[utils.FlatstorePrfx+utils.FieldSepOpt]; has &&
utils.IfaceAsString(fldSep) == utils.EmptyString {
return fmt.Errorf("<%s> empty %s for reader with ID: %s", utils.ERs, utils.FlatstorePrfx+utils.FieldSepOpt, rdr.ID)
}
if rowl, has := rdr.Opts[utils.FlatstorePrfx+utils.RowLengthOpt]; has {
if _, err := utils.IfaceAsTInt64(rowl); err != nil {
return fmt.Errorf("<%s> error when converting %s: <%s> for reader with ID: %s", utils.ERs, utils.FlatstorePrfx+utils.RowLengthOpt, err.Error(), rdr.ID)
}
}
if lq, has := rdr.Opts[utils.FlatstorePrfx+utils.LazyQuotes]; has {
if _, err := utils.IfaceAsBool(lq); err != nil {
return fmt.Errorf("<%s> error when converting %s: <%s> for reader with ID: %s", utils.ERs, utils.FlatstorePrfx+utils.LazyQuotes, err.Error(), rdr.ID)
}
}
if ttl, has := rdr.Opts[utils.FstPartialRecordCacheOpt]; has {
if _, err := utils.IfaceAsDuration(ttl); err != nil {
return fmt.Errorf("<%s> error when converting %s: <%s> for reader with ID: %s", utils.ERs, utils.FstPartialRecordCacheOpt, err.Error(), rdr.ID)
}
}
case utils.MetaKafkajsonMap:
if rdr.RunDelay > 0 {
return fmt.Errorf("<%s> the RunDelay field can not be bigger than zero for reader with ID: %s", utils.ERs, rdr.ID)

View File

@@ -26,9 +26,12 @@ import (
// ERsCfg the config for ERs
type ERsCfg struct {
Enabled bool
SessionSConns []string
Readers []*EventReaderCfg
Enabled bool
SessionSConns []string
Readers []*EventReaderCfg
PartialCacheTTL time.Duration
PartialCacheAction string
PartialPath string
}
func (erS *ERsCfg) loadFromJSONCfg(jsnCfg *ERsJsonCfg, msgTemplates map[string][]*FCTemplate, sep string, dfltRdrCfg *EventReaderCfg) (err error) {
@@ -41,6 +44,17 @@ func (erS *ERsCfg) loadFromJSONCfg(jsnCfg *ERsJsonCfg, msgTemplates map[string][
if jsnCfg.Sessions_conns != nil {
erS.SessionSConns = updateInternalConns(*jsnCfg.Sessions_conns, utils.MetaSessionS)
}
if jsnCfg.Partial_cache_ttl != nil {
if erS.PartialCacheTTL, err = utils.ParseDurationWithNanosecs(*jsnCfg.Partial_cache_ttl); err != nil {
return
}
}
if jsnCfg.Partial_cache_action != nil {
erS.PartialCacheAction = *jsnCfg.Partial_cache_action
}
if jsnCfg.Partial_path != nil {
erS.PartialPath = *jsnCfg.Partial_path
}
return erS.appendERsReaders(jsnCfg.Readers, msgTemplates, sep, dfltRdrCfg)
}
@@ -111,19 +125,20 @@ func (erS *ERsCfg) AsMapInterface(separator string) (initialMP map[string]interf
// EventReaderCfg the event for the Event Reader
type EventReaderCfg struct {
ID string
Type string
RunDelay time.Duration
ConcurrentReqs int
SourcePath string
ProcessedPath string
Opts map[string]interface{}
Tenant RSRParsers
Timezone string
Filters []string
Flags utils.FlagsWithParams
Fields []*FCTemplate
CacheDumpFields []*FCTemplate
ID string
Type string
RunDelay time.Duration
ConcurrentReqs int
SourcePath string
ProcessedPath string
Opts map[string]interface{}
Tenant RSRParsers
Timezone string
Filters []string
Flags utils.FlagsWithParams
Fields []*FCTemplate
PartialCommitFields []*FCTemplate
CacheDumpFields []*FCTemplate
}
func (er *EventReaderCfg) loadFromJSONCfg(jsnCfg *EventReaderJsonCfg, msgTemplates map[string][]*FCTemplate, sep string) (err error) {
@@ -184,6 +199,16 @@ func (er *EventReaderCfg) loadFromJSONCfg(jsnCfg *EventReaderJsonCfg, msgTemplat
er.CacheDumpFields = tpls
}
}
if jsnCfg.Partial_commit_fields != nil {
if er.PartialCommitFields, err = FCTemplatesFromFCTemplatesJSONCfg(*jsnCfg.Partial_commit_fields, sep); err != nil {
return err
}
if tpls, err := InflateTemplates(er.PartialCommitFields, msgTemplates); err != nil {
return err
} else if tpls != nil {
er.PartialCommitFields = tpls
}
}
if jsnCfg.Opts != nil {
for k, v := range jsnCfg.Opts {
er.Opts[k] = v
@@ -221,6 +246,12 @@ func (er EventReaderCfg) Clone() (cln *EventReaderCfg) {
cln.CacheDumpFields[idx] = fld.Clone()
}
}
if er.PartialCommitFields != nil {
cln.PartialCommitFields = make([]*FCTemplate, len(er.PartialCommitFields))
for idx, fld := range er.PartialCommitFields {
cln.PartialCommitFields[idx] = fld.Clone()
}
}
for k, v := range er.Opts {
cln.Opts[k] = v
}
@@ -266,6 +297,13 @@ func (er *EventReaderCfg) AsMapInterface(separator string) (initialMP map[string
}
initialMP[utils.CacheDumpFieldsCfg] = cacheDumpFields
}
if er.PartialCommitFields != nil {
parCFields := make([]map[string]interface{}, len(er.PartialCommitFields))
for i, item := range er.PartialCommitFields {
parCFields[i] = item.AsMapInterface(separator)
}
initialMP[utils.PartialCommitFieldsCfg] = parCFields
}
if er.RunDelay > 0 {
initialMP[utils.RunDelayCfg] = er.RunDelay.String()
@@ -277,19 +315,20 @@ func (er *EventReaderCfg) AsMapInterface(separator string) (initialMP map[string
// EventReaderSJsonCfg is the configuration of a single EventReader
type EventReaderJsonCfg struct {
Id *string
Type *string
Run_delay *string
Concurrent_requests *int
Source_path *string
Processed_path *string
Opts map[string]interface{}
Tenant *string
Timezone *string
Filters *[]string
Flags *[]string
Fields *[]*FcTemplateJsonCfg
Cache_dump_fields *[]*FcTemplateJsonCfg
Id *string
Type *string
Run_delay *string
Concurrent_requests *int
Source_path *string
Processed_path *string
Opts map[string]interface{}
Tenant *string
Timezone *string
Filters *[]string
Flags *[]string
Fields *[]*FcTemplateJsonCfg
Partial_commit_fields *[]*FcTemplateJsonCfg
Cache_dump_fields *[]*FcTemplateJsonCfg
}
func diffEventReaderJsonCfg(d *EventReaderJsonCfg, v1, v2 *EventReaderCfg, separator string) *EventReaderJsonCfg {
@@ -384,9 +423,12 @@ func diffEventReadersJsonCfg(d *[]*EventReaderJsonCfg, v1, v2 []*EventReaderCfg,
// EventReaderSJsonCfg contains the configuration of EventReaderService
type ERsJsonCfg struct {
Enabled *bool
Sessions_conns *[]string
Readers *[]*EventReaderJsonCfg
Enabled *bool
Sessions_conns *[]string
Readers *[]*EventReaderJsonCfg
Partial_cache_ttl *string
Partial_cache_action *string
Partial_path *string
}
func diffERsJsonCfg(d *ERsJsonCfg, v1, v2 *ERsCfg, separator string) *ERsJsonCfg {

View File

@@ -78,6 +78,7 @@
"ers": {
"enabled": true,
"sessions_conns": ["*internal"],
"partial_cache_ttl": "500ms",
"readers": [
{
"id": "file_reader1",
@@ -260,13 +261,14 @@
"id": "PartialCSV1",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs1/in",
"flags": ["*cdrs"],
"processed_path": "/tmp/partErs1/out",
"processed_path": "",
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*dump_to_file",
"partialOrderField": "~*req.AnswerTime",
"partialPath": "/tmp/partErs1/out",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -285,7 +287,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]},
],
"cache_dump_fields": [
{"tag": "OriginID", "path":"*exp.OriginID", "type": "*variable", "value": "~*req.OriginID"},
@@ -303,13 +307,14 @@
"id": "PartialCSV_PostExpiry",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs2/in",
"processed_path": "/tmp/partErs2/out",
"processed_path": "",
"flags": ["*cdrs"],
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*post_cdr",
"partialOrderField": "~*req.AnswerTime",
"partialPath": "/tmp/partErs2/out",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -328,38 +333,66 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]}
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]}
],
},
{
"id": "FlatstoreOsips",
"enabled": true,
"run_delay": "-1",
"type": "*flatstore",
"type": "*file_csv",
"opts": {
"fstFieldSeparator":"|",
"fstFailedCallsPrefix": "missed_calls",
"fstRecordCacheTTL": "500ms",
"csvFieldSeparator":"|",
"partialcsvFieldSeparator": "|",
"partialCacheAction": "*dump_to_file",
"partialOrderField": "~*opts.order",
"partialPath": "/tmp/flatstoreErs/out",
},
"source_path": "/tmp/flatstoreErs/in",
"processed_path": "/tmp/flatstoreErs/out",
"flags": ["*cdrs"],
"fields":[
{"tag": "Tor", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
{"tag": "OriginHost", "path": "*cgreq.OriginHost", "type": "*constant","value":"flatStore", "mandatory": true},
{"tag": "OriginID", "path": "*cgreq.OriginID", "type": "*variable","value":"~*req.3;~*req.1;~*req.2", "mandatory": true},
{"tag": "RequestType", "path": "*cgreq.RequestType", "type": "*variable", "value": "~*invite.7", "mandatory": true},
{"tag": "Tenant", "path": "*cgreq.Tenant", "type": "*constant", "value": "cgrates.org", "mandatory": true},
{"tag": "Category", "path": "*cgreq.Category", "type": "*constant", "value": "call", "mandatory": true},
{"tag": "Account", "path": "*cgreq.Account", "type": "*variable", "value": "~*invite.8", "mandatory": true},
{"tag": "Subject", "path": "*cgreq.Subject", "type": "*variable", "value": "~*invite.8", "mandatory": true},
{"tag": "Destination", "path": "*cgreq.Destination", "type": "*variable", "value": "~*invite.9", "mandatory": true},
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*invite.6", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*invite.6", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*constant","value": "0", "mandatory": true, "filters": ["*prefix:~*vars.FileName:missed_calls"]},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*usage_difference","value": "~*bye.6;~*invite.6", "mandatory": true, "filters": ["*notprefix:~*vars.FileName:missed_calls"]},
{"tag": "RequestType", "path": "*cgreq.RequestType", "type": "*variable", "value": "~*req.7", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "Account", "path": "*cgreq.Account", "type": "*variable", "value": "~*req.8", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "Subject", "path": "*cgreq.Subject", "type": "*variable", "value": "~*req.8", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "Destination", "path": "*cgreq.Destination", "type": "*variable", "value": "~*req.9", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.6", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.6", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
{"tag": "EndTime", "path": "*cgreq.EndTime", "type": "*variable","value": "~*req.6", "mandatory": true, "filters": ["*string:~*req.0:BYE"]},
{"tag": "DisconnectCause", "path": "*cgreq.DisconnectCause", "type": "*variable", "value": "~*req.4; ;~*req.5", "mandatory": true},
{"tag": "DialogId", "path": "*cgreq.DialogId", "type": "*variable", "value": "~*req.11"}
{"tag": "DialogId", "path": "*cgreq.DialogId", "type": "*variable", "value": "~*req.11"},
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "true"},
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "false","filters": ["*prefix:~*vars.FileName:missed_calls"]},
{"tag": "Invite", "path": "*opts.invite", "type": "*constant", "value": "true", "filters":["*string:~*req.0:INVITE"]},
{"tag": "Bye", "path": "*opts.bye", "type": "*constant", "value": "true", "filters":["*string:~*req.0:BYE"]},
{"tag": "Order", "path": "*opts.order", "type": "*constant", "value": "0", "filters":["*string:~*req.0:INVITE"]},
{"tag": "Order", "path": "*opts.order", "type": "*constant", "value": "1", "filters":["*string:~*req.0:BYE"]},
],
"partial_commit_fields": [
// {"tag": "Tor", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
// {"tag": "OriginID", "path": "*cgreq.OriginID", "type": "*variable","value":"~*req.OriginID", "mandatory": true},
// {"tag": "RequestType", "path": "*cgreq.RequestType", "type": "*variable", "value":"~*req.RequestType", "mandatory": true,"filters": ["*string:~*req.0:INVITE"]},
// {"tag": "Tenant", "path": "*cgreq.Tenant", "type": "*constant", "value": "cgrates.org", "mandatory": true},
// {"tag": "Category", "path": "*cgreq.Category", "type": "*constant", "value": "call", "mandatory": true},
// {"tag": "Account", "path": "*cgreq.Account", "type": "*variable", "value":"~*req.Account", "mandatory": true},
// {"tag": "Subject", "path": "*cgreq.Subject", "type": "*variable", "value":"~*req.Subject", "mandatory": true},
// {"tag": "Destination", "path": "*cgreq.Destination", "type": "*variable", "value":"~*req.Destination" , "mandatory": true},
// {"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value":"~*req.SetupTime" , "mandatory": true},
// {"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value":"~*req.AnswerTime" , "mandatory": true},
// {"tag": "EndTime", "path": "*cgreq.EndTime", "type": "*variable","value": "~*req.EndTime"},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*constant","value": "0", "mandatory": true, "filters": ["*prefix:~*vars.FileName:missed_calls"]},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*usage_difference","value": "~*req.EndTime;~*req.AnswerTime", "mandatory": true, "filters": ["*notprefix:~*vars.FileName:missed_calls","*exists:~*opts.invite:","*exists:~*opts.bye:"]},
// {"tag": "DisconnectCause", "path": "*cgreq.DisconnectCause", "type": "*variable", "value":"~*req.DisconnectCause", "mandatory": true},
// {"tag": "DialogId", "path": "*cgreq.DialogId", "type": "*variable", "value":"~*req.DialogId"},
{"tag": "Partial", "path": "*opts.*partial", "type": "*constant", "value": "false","filters": ["*exists:~*opts.invite:","*exists:~*opts.bye:"]},
],
"cache_dump_fields": [
],
},
{
"id": "JSONReader",

View File

@@ -261,13 +261,13 @@
"id": "PartialCSV1",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs1/in",
"flags": ["*cdrs"],
"processed_path": "/tmp/partErs1/out",
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*dump_to_file",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -286,7 +286,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]},
],
"cache_dump_fields": [
{"tag": "OriginID", "path":"*exp.OriginID", "type": "*variable", "value": "~*req.OriginID"},
@@ -304,13 +306,13 @@
"id": "PartialCSV_PostExpiry",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs2/in",
"processed_path": "/tmp/partErs2/out",
"flags": ["*cdrs"],
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*post_cdr",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -329,7 +331,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]}
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]}
],
},
{

View File

@@ -258,13 +258,13 @@
"id": "PartialCSV1",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs1/in",
"flags": ["*cdrs"],
"processed_path": "/tmp/partErs1/out",
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*dump_to_file",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -283,7 +283,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]},
],
"cache_dump_fields": [
{"tag": "OriginID", "path":"*exp.OriginID", "type": "*variable", "value": "~*req.OriginID"},
@@ -301,13 +303,13 @@
"id": "PartialCSV_PostExpiry",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs2/in",
"processed_path": "/tmp/partErs2/out",
"flags": ["*cdrs"],
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*post_cdr",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -326,7 +328,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]}
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]}
],
},
{

View File

@@ -255,13 +255,13 @@
"id": "PartialCSV1",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs1/in",
"flags": ["*cdrs"],
"processed_path": "/tmp/partErs1/out",
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*dump_to_file",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -280,7 +280,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]},
],
"cache_dump_fields": [
{"tag": "OriginID", "path":"*exp.OriginID", "type": "*variable", "value": "~*req.OriginID"},
@@ -298,13 +300,13 @@
"id": "PartialCSV_PostExpiry",
"enabled": true,
"run_delay": "-1",
"type": "*partial_csv",
"type": "*file_csv",
"source_path": "/tmp/partErs2/in",
"processed_path": "/tmp/partErs2/out",
"flags": ["*cdrs"],
"opts": {
"csvRecordCacheTTL": "500ms",
"csvCacheExpiryAction": "*post_cdr",
"partialOrderField": "~*req.AnswerTime",
},
"fields":[
{"tag": "ToR", "path": "*cgreq.ToR", "type": "*constant", "value": "*voice", "mandatory": true},
@@ -323,7 +325,9 @@
{"tag": "SetupTime", "path": "*cgreq.SetupTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "AnswerTime", "path": "*cgreq.AnswerTime", "type": "*variable", "value": "~*req.4", "mandatory": true},
{"tag": "Usage", "path": "*cgreq.Usage", "type": "*variable", "value": "~*req.6:s/^(\\d+)$/${1}s/", "mandatory": true},
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]}
{"tag": "Partial", "path": "*cgreq.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},// keep this here for partial cdr field
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "true", "filters":["*string:~*req.10:partial"]},
{"tag": "Partial", "path": "*opts.Partial", "type": "*constant", "value": "false", "filters":["*notstring:~*req.10:partial"]}
],
},
{

View File

@@ -134,12 +134,6 @@ type
**\*file_csv**
Reader for *comma separated* files.
**\*partial_csv**
Reader for *comma separated* where content spans over multiple files.
**\*flatstore**
Reader for Kamailio_/OpenSIPS_ *db_flatstore* files.
**\*file_xml**
Reader for *.xml* formatted files.

View File

@@ -290,7 +290,7 @@ func (sS *StatService) processEvent(tnt string, args *StatsArgsProcessEvent) (st
for _, sq := range matchSQs {
stsIDs = append(stsIDs, sq.ID)
lkID := utils.StatQueuePrefix + sq.TenantID()
guardian.Guardian.Guard(context.TODO(), func(_ *context.Context) (gRes interface{}, gErr error) {
guardian.Guardian.Guard(context.TODO(), func(_ *context.Context) (_ interface{}, _ error) {
err = sq.ProcessEvent(tnt, args.ID, sS.filterS, evNm)
return
}, sS.cgrcfg.GeneralCfg().LockingTimeout, lkID)

View File

@@ -33,15 +33,16 @@ import (
// NewAMQPER return a new amqp event reader
func NewAMQPER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &AMQPER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -69,10 +70,11 @@ type AMQPER struct {
exchangeType string
routingKey string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
conn *amqp.Connection
channel *amqp.Channel
@@ -202,7 +204,11 @@ func (rdr *AMQPER) processMessage(msg []byte) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -71,7 +71,7 @@ func TestAMQPER(t *testing.T) {
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
if rdr, err = NewAMQPER(cfg, 1, rdrEvents,
if rdr, err = NewAMQPER(cfg, 1, rdrEvents, make(chan *erEvent, 1),
rdrErr, new(engine.FilterS), rdrExit); err != nil {
t.Fatal(err)
}
@@ -134,7 +134,7 @@ func TestAMQPERServeError(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
expected := "AMQP scheme must be either 'amqp://' or 'amqps://'"
rdr, err := NewAMQPER(cfg, cfgIdx, nil, nil, nil, nil)
rdr, err := NewAMQPER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}

View File

@@ -33,15 +33,16 @@ import (
// NewAMQPv1ER return a new amqpv1 event reader
func NewAMQPv1ER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &AMQPv1ER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -65,10 +66,11 @@ type AMQPv1ER struct {
queueID string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
conn *amqpv1.Client
ses *amqpv1.Session
@@ -176,7 +178,11 @@ func (rdr *AMQPv1ER) processMessage(msg []byte) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -76,7 +76,7 @@ func TestAMQPERv1(t *testing.T) {
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
if rdr, err = NewAMQPv1ER(cfg, 1, rdrEvents,
if rdr, err = NewAMQPv1ER(cfg, 1, rdrEvents, make(chan *erEvent, 1),
rdrErr, new(engine.FilterS), rdrExit); err != nil {
t.Fatal(err)
}
@@ -151,7 +151,7 @@ func TestAmqpv1NewAMQPv1ER(t *testing.T) {
},
}
result, err := NewAMQPv1ER(cfg, cfgIdx, nil, nil, nil, nil)
result, err := NewAMQPv1ER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
@@ -182,7 +182,7 @@ func TestAmqpv1NewAMQPv1ER2(t *testing.T) {
},
}
result, err := NewAMQPv1ER(cfg, cfgIdx, nil, nil, nil, nil)
result, err := NewAMQPv1ER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}

View File

@@ -19,14 +19,21 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"encoding/csv"
"fmt"
"os"
"path"
"sort"
"sync"
"time"
"github.com/cgrates/birpc/context"
"github.com/cgrates/cgrates/agents"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/sessions"
"github.com/cgrates/cgrates/utils"
"github.com/cgrates/ltcache"
)
// erEvent is passed from reader to ERs
@@ -36,31 +43,37 @@ type erEvent struct {
}
// NewERService instantiates the ERService
func NewERService(cfg *config.CGRConfig, filterS *engine.FilterS, connMgr *engine.ConnManager) *ERService {
return &ERService{
cfg: cfg,
rdrs: make(map[string]EventReader),
rdrPaths: make(map[string]string),
stopLsn: make(map[string]chan struct{}),
rdrEvents: make(chan *erEvent),
rdrErr: make(chan error),
filterS: filterS,
connMgr: connMgr,
func NewERService(cfg *config.CGRConfig, filterS *engine.FilterS, connMgr *engine.ConnManager) (ers *ERService) {
ers = &ERService{
cfg: cfg,
rdrs: make(map[string]EventReader),
rdrPaths: make(map[string]string),
stopLsn: make(map[string]chan struct{}),
rdrEvents: make(chan *erEvent),
partialEvents: make(chan *erEvent),
rdrErr: make(chan error),
filterS: filterS,
connMgr: connMgr,
}
ers.partialCache = ltcache.NewCache(ltcache.UnlimitedCaching, cfg.ERsCfg().PartialCacheTTL, false, ers.onEvicted)
return
}
// ERService is managing the EventReaders
type ERService struct {
sync.RWMutex
cfg *config.CGRConfig
rdrs map[string]EventReader // map[rdrID]EventReader
rdrPaths map[string]string // used for reloads in case of path changes
stopLsn map[string]chan struct{} // map[rdrID] chan struct{}
rdrEvents chan *erEvent // receive here the events from readers
rdrErr chan error // receive here errors which should stop the app
cfg *config.CGRConfig
rdrs map[string]EventReader // map[rdrID]EventReader
rdrPaths map[string]string // used for reloads in case of path changes
stopLsn map[string]chan struct{} // map[rdrID] chan struct{}
rdrEvents chan *erEvent // receive here the events from readers
partialEvents chan *erEvent // receive here the partial events from readers
rdrErr chan error // receive here errors which should stop the app
filterS *engine.FilterS
connMgr *engine.ConnManager
partialCache *ltcache.Cache
}
// ListenAndServe keeps the service alive
@@ -90,8 +103,14 @@ func (erS *ERService) ListenAndServe(stopChan, cfgRldChan chan struct{}) (err er
case erEv := <-erS.rdrEvents:
if err := erS.processEvent(erEv.cgrEvent, erEv.rdrCfg); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading event: <%s> got error: <%s>",
utils.ERs, utils.ToIJSON(erEv.cgrEvent), err.Error()))
fmt.Sprintf("<%s> reading event: <%s> from reader: <%s> got error: <%s>",
utils.ERs, utils.ToJSON(erEv.cgrEvent), erEv.rdrCfg.ID, err.Error()))
}
case pEv := <-erS.partialEvents:
if err := erS.processPartialEvent(pEv.cgrEvent, pEv.rdrCfg); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading partial event: <%s> from reader: <%s> got error: <%s>",
utils.ERs, utils.ToJSON(pEv.cgrEvent), pEv.rdrCfg.ID, err.Error()))
}
case <-cfgRldChan: // handle reload
cfgIDs := make(map[string]int)
@@ -143,7 +162,7 @@ func (erS *ERService) addReader(rdrID string, cfgIdx int) (err error) {
erS.stopLsn[rdrID] = make(chan struct{})
var rdr EventReader
if rdr, err = NewEventReader(erS.cfg, cfgIdx,
erS.rdrEvents, erS.rdrErr,
erS.rdrEvents, erS.partialEvents, erS.rdrErr,
erS.filterS, erS.stopLsn[rdrID]); err != nil {
return
}
@@ -302,3 +321,200 @@ func (erS *ERService) closeAllRdrs() {
close(stopL)
}
}
const (
partialOpt = "*partial"
)
type erEvents struct {
events []*utils.CGREvent
rdrCfg *config.EventReaderCfg
}
func (erS *ERService) processPartialEvent(ev *utils.CGREvent, rdrCfg *config.EventReaderCfg) (err error) {
orgID, err := ev.FieldAsString(utils.OriginID)
if err == utils.ErrNotFound {
utils.Logger.Warning(
fmt.Sprintf("<%s> Missing <OriginID> field for partial event <%s>",
utils.ERs, utils.ToJSON(ev)))
return
}
orgHost, err := ev.FieldAsString(utils.OriginHost)
if err == utils.ErrNotFound {
utils.Logger.Warning(
fmt.Sprintf("<%s> Missing <OriginHost> field for partial event <%s>",
utils.ERs, utils.ToJSON(ev)))
return
}
cgrID := utils.Sha1(orgID, orgHost)
evs, has := erS.partialCache.Get(cgrID)
var cgrEvs *erEvents
if !has || evs == nil {
cgrEvs = &erEvents{
events: []*utils.CGREvent{ev},
rdrCfg: rdrCfg,
}
} else {
cgrEvs = evs.(*erEvents)
cgrEvs.events = append(cgrEvs.events, ev)
cgrEvs.rdrCfg = rdrCfg
}
var cgrEv *utils.CGREvent
if cgrEv, err = erS.preparePartialEvents(cgrEvs.events, cgrEvs.rdrCfg); err != nil {
return
}
if partial := cgrEv.APIOpts[partialOpt]; !utils.IsSliceMember([]string{"false", utils.EmptyString}, utils.IfaceAsString(partial)) {
erS.partialCache.Set(cgrID, cgrEvs, nil)
return
}
// complete CDR
if len(cgrEvs.events) != 1 {
erS.partialCache.Set(cgrID, nil, nil) // set it with nil in cache to ignore when we expire the item
erS.partialCache.Remove(cgrID)
}
go func() { erS.rdrEvents <- &erEvent{cgrEvent: cgrEv, rdrCfg: rdrCfg} }()
return
}
func (erS *ERService) preparePartialEvents(cgrEvs []*utils.CGREvent, cfg *config.EventReaderCfg) (cgrEv *utils.CGREvent, err error) {
cgrEv = cgrEvs[0]
if len(cgrEvs) != 1 {
ordFld := utils.IfaceAsString(cfg.Opts[utils.PartialOrderFieldOpt])
if ordFld == utils.EmptyString {
return nil, utils.NewErrMandatoryIeMissing(utils.PartialOrderFieldOpt)
}
fields := make([]interface{}, len(cgrEvs))
var ordPath config.RSRParsers
if ordPath, err = config.NewRSRParsers(ordFld, erS.cfg.GeneralCfg().RSRSep); err != nil {
return nil, err
}
for i, ev := range cgrEvs {
if fields[i], err = ordPath.ParseDataProviderWithInterfaces(ev.AsDataProvider()); err != nil {
return
}
if fldStr, castStr := fields[i].(string); castStr { // attempt converting string since deserialization fails here (ie: time.Time fields)
fields[i] = utils.StringToInterface(fldStr)
}
}
//sort CGREvents based on partialOrderFieldOpt
sort.Slice(cgrEvs, func(i, j int) bool {
gt, serr := utils.GreaterThan(fields[i], fields[j], true)
if serr != nil {
err = serr
}
return gt
})
if err != nil {
return
}
// compose the CGREvent from slice
cgrEv = &utils.CGREvent{
Tenant: cgrEvs[0].Tenant,
ID: utils.UUIDSha1Prefix(),
Time: utils.TimePointer(time.Now()),
Event: make(map[string]interface{}),
APIOpts: make(map[string]interface{}),
}
for _, ev := range cgrEvs {
for key, value := range ev.Event {
cgrEv.Event[key] = value
}
for key, val := range ev.APIOpts {
cgrEv.APIOpts[key] = val
}
}
}
if len(cfg.PartialCommitFields) != 0 {
agReq := agents.NewAgentRequest(
utils.MapStorage(cgrEv.Event), nil,
nil, nil, cgrEv.APIOpts, cfg.Tenant,
erS.cfg.GeneralCfg().DefaultTenant,
utils.FirstNonEmpty(cfg.Timezone,
erS.cfg.GeneralCfg().DefaultTimezone),
erS.filterS, nil) // create an AgentRequest
if err = agReq.SetFields(cfg.PartialCommitFields); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> processing partial event: <%s>, ignoring due to error: <%s>",
utils.ERs, utils.ToJSON(cgrEv), err.Error()))
return
}
cgrEv = utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
}
return
}
func (erS *ERService) onEvicted(id string, value interface{}) {
if value == nil {
return
}
eEvs := value.(*erEvents)
action := erS.cfg.ERsCfg().PartialCacheAction
if cAct, has := eEvs.rdrCfg.Opts[utils.PartialCacheAction]; has {
action = utils.IfaceAsString(cAct)
}
switch action {
case utils.MetaNone:
case utils.MetaPostCDR:
cgrEv, err := erS.preparePartialEvents(eEvs.events, eEvs.rdrCfg)
if err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> failed posting expired parial events <%s> due error <%s>",
utils.ERs, utils.ToJSON(eEvs.events), err.Error()))
return
}
erS.rdrEvents <- &erEvent{cgrEvent: cgrEv, rdrCfg: eEvs.rdrCfg}
case utils.MetaDumpToFile:
tmz := utils.FirstNonEmpty(eEvs.rdrCfg.Timezone, erS.cfg.GeneralCfg().DefaultTimezone)
expPath := erS.cfg.ERsCfg().PartialPath
if path, has := eEvs.rdrCfg.Opts[utils.PartialPathOpt]; has {
expPath = utils.IfaceAsString(path)
}
if expPath == utils.EmptyString { // do not send the partial events to any file
return
}
dumpFilePath := path.Join(expPath, fmt.Sprintf("%s.%d%s",
id, time.Now().Unix(), utils.TmpSuffix))
fileOut, err := os.Create(dumpFilePath)
if err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed creating %s, error: %s",
utils.ERs, dumpFilePath, err.Error()))
return
}
defer fileOut.Close()
csvWriter := csv.NewWriter(fileOut)
if fldSep, has := eEvs.rdrCfg.Opts[utils.PartialCSVFieldSepartor]; has {
csvWriter.Comma = rune(utils.IfaceAsString(fldSep)[0])
}
for _, ev := range eEvs.events {
oNm := map[string]*utils.OrderedNavigableMap{
utils.MetaExp: utils.NewOrderedNavigableMap(),
}
eeReq := engine.NewEventRequest(utils.MapStorage(ev.Event), utils.MapStorage{}, ev.APIOpts,
eEvs.rdrCfg.Tenant, erS.cfg.GeneralCfg().DefaultTenant,
tmz, erS.filterS, oNm)
if err = eeReq.SetFields(eEvs.rdrCfg.CacheDumpFields); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> Converting CDR with CGRID: <%s> to record , ignoring due to error: <%s>",
utils.ERs, id, err.Error()))
return
}
record := eeReq.OrdNavMP[utils.MetaExp].OrderedFieldsAsStrings()
if err = csvWriter.Write(record); err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed writing partial record %v to file: %s, error: %s",
utils.ERs, record, dumpFilePath, err.Error()))
return
}
}
csvWriter.Flush()
}
}

View File

@@ -23,7 +23,6 @@ package ers
import (
"errors"
"reflect"
"sync"
"testing"
"time"
@@ -344,7 +343,6 @@ func TestERsListenAndServeCfgRldChan5(t *testing.T) {
fltrS := &engine.FilterS{}
srv := NewERService(cfg, fltrS, nil)
exp := &CSVFileER{
RWMutex: sync.RWMutex{},
cgrCfg: cfg,
cfgIdx: 0,
fltrS: nil,

View File

@@ -25,7 +25,6 @@ import (
"os"
"path"
"strings"
"sync"
"time"
"github.com/cgrates/birpc/context"
@@ -36,21 +35,22 @@ import (
)
func NewCSVFileER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
csvEr := &CSVFileER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
csvEr.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
@@ -60,15 +60,16 @@ func NewCSVFileER(cfg *config.CGRConfig, cfgIdx int,
// CSVFileER implements EventReader interface for .csv files
type CSVFileER struct {
sync.RWMutex
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
}
func (rdr *CSVFileER) Config() *config.EventReaderCfg {
@@ -186,7 +187,11 @@ func (rdr *CSVFileER) processFile(fPath, fName string) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -36,21 +36,22 @@ import (
)
func NewFWVFileER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
fwvER := &FWVFileER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
fwvER.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
@@ -66,6 +67,7 @@ type FWVFileER struct {
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
@@ -218,7 +220,11 @@ func (rdr *FWVFileER) processFile(fPath, fName string) (err error) {
}
rdr.offset += rdr.lineLen // increase the offset
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
@@ -306,7 +312,11 @@ func (rdr *FWVFileER) processTrailer(file *os.File, rowNr, evsPosted int, absPat
return err
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
@@ -348,7 +358,11 @@ func (rdr *FWVFileER) createHeaderMap(record string, rowNr, evsPosted int, absPa
}
rdr.offset += rdr.headerOffset // increase the offset
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -21,6 +21,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"fmt"
"net/rpc"
"os"
"path"
@@ -209,7 +210,7 @@ func TestNewFWVFileER(t *testing.T) {
rdrDir: "",
}
cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs = 1
result, err := NewFWVFileER(cfg, cfgIdx, nil, nil, nil, nil)
result, err := NewFWVFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
@@ -250,7 +251,7 @@ func TestFWVFileConfig(t *testing.T) {
},
}
expected := cfg.ERsCfg().Readers[0]
rdr, err := NewFWVFileER(cfg, 0, nil, nil, nil, nil)
rdr, err := NewFWVFileER(cfg, 0, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
@@ -259,3 +260,455 @@ func TestFWVFileConfig(t *testing.T) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, result)
}
}
func TestFileFWVProcessEvent(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := &engine.FilterS{}
filePath := "/tmp/TestFileFWVProcessEvent/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.fwv"))
if err != nil {
t.Error(err)
}
file.Write([]byte("test,test2"))
file.Close()
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
fname := "file1.fwv"
errExpect := "unsupported field prefix: <> when set fields"
eR.Config().Fields = []*config.FCTemplate{
{
Value: config.RSRParsers{
{
Rules: "~*hdr",
},
},
Type: utils.MetaRemove,
// Path: utils.MetaVars,
},
}
eR.Config().Fields[0].ComputePath()
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileFWVServeErrTimeDuration0(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
rdr, err := NewFWVFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
rdr.Config().RunDelay = time.Duration(0)
result := rdr.Serve()
if !reflect.DeepEqual(result, nil) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, result)
}
}
func TestFileFWVServeErrTimeDurationNeg1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
rdr, err := NewFWVFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
rdr.Config().RunDelay = time.Duration(-1)
expected := "no such file or directory"
err = rdr.Serve()
if err == nil || err.Error() != expected {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, err)
}
}
func TestFileFWV(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
filePath := "/tmp/fwvErs/out"
err := os.MkdirAll(filePath, 0777)
if err != nil {
t.Error(err)
}
for i := 1; i < 4; i++ {
if _, err := os.Create(path.Join(filePath, fmt.Sprintf("file%d.fwv", i))); err != nil {
t.Error(err)
}
}
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
os.Create(path.Join(filePath, "file1.txt"))
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
}
func TestFileFWVServeDefault(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
filePath := "/tmp/fwvErs/out"
err := os.MkdirAll(filePath, 0777)
if err != nil {
t.Error(err)
}
for i := 1; i < 4; i++ {
if _, err := os.Create(path.Join(filePath, fmt.Sprintf("file%d.fwv", i))); err != nil {
t.Error(err)
}
}
os.Create(path.Join(filePath, "file1.txt"))
eR.Config().RunDelay = 1 * time.Millisecond
go func() {
time.Sleep(20 * time.Millisecond)
close(eR.rdrExit)
}()
eR.serveDefault()
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileFWVExit(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
eR.rdrExit <- struct{}{}
}
func TestFileFWVProcessTrailer(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"OriginID": "testOriginID",
},
APIOpts: map[string]interface{}{},
}
eR.conReqs <- struct{}{}
filePath := "/tmp/TestFileFWVProcessTrailer/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.txt"))
if err != nil {
t.Error(err)
}
trailerFields := []*config.FCTemplate{
{
Tag: "OriginId",
Path: "*cgreq.OriginID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testOriginID", utils.InfieldSep),
},
}
eR.Config().Fields = trailerFields
eR.Config().Fields[0].ComputePath()
if err := eR.processTrailer(file, 0, 0, "/tmp/fwvErs/out", trailerFields); err != nil {
t.Error(err)
}
select {
case data := <-eR.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(data.cgrEvent, expEvent) {
t.Errorf("Expected %v but received %v", utils.ToJSON(expEvent), utils.ToJSON(data.cgrEvent))
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileFWVProcessTrailerError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
filePath := "/tmp/TestFileFWVProcessTrailer/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.txt"))
if err != nil {
t.Error(err)
}
trailerFields := []*config.FCTemplate{
{},
}
errExpect := "unsupported type: <>"
if err := eR.processTrailer(file, 0, 0, "/tmp/fwvErs/out", trailerFields); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileFWVProcessTrailerError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().Tenant = config.RSRParsers{
{
Rules: "cgrates.org",
},
}
filePath := "/tmp/TestFileFWVProcessTrailer/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.txt"))
if err != nil {
t.Error(err)
}
trailerFields := []*config.FCTemplate{
{
Tag: "OriginId",
Path: "*cgreq.OriginID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testOriginID", utils.InfieldSep),
},
}
//
eR.Config().Filters = []string{"Filter1"}
errExpect := "NOT_FOUND:Filter1"
if err := eR.processTrailer(file, 0, 0, "/tmp/fwvErs/out", trailerFields); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestFileFWVProcessTrailerError3(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
trailerFields := []*config.FCTemplate{
{
Tag: "OriginId",
Path: "*cgreq.OriginID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testOriginID", utils.InfieldSep),
},
}
var file *os.File
errExp := "invalid argument"
if err := eR.processTrailer(file, 0, 0, "/tmp/fwvErs/out", trailerFields); err == nil || err.Error() != errExp {
t.Errorf("Expected %v but received %v", errExp, err)
}
}
func TestFileFWVCreateHeaderMap(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"OriginID": "testOriginID",
},
APIOpts: map[string]interface{}{},
}
hdrFields := []*config.FCTemplate{
{
Tag: "OriginId",
Path: "*cgreq.OriginID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testOriginID", utils.InfieldSep),
},
}
eR.Config().Fields = hdrFields
eR.Config().Fields[0].ComputePath()
record := "testRecord"
if err := eR.createHeaderMap(record, 0, 0, "/tmp/fwvErs/out", hdrFields); err != nil {
t.Error(err)
}
select {
case data := <-eR.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(data.cgrEvent, expEvent) {
t.Errorf("Expected %v but received %v", utils.ToJSON(expEvent), utils.ToJSON(data.cgrEvent))
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
}
func TestFileFWVCreateHeaderMapError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
trailerFields := []*config.FCTemplate{
{},
}
record := "testRecord"
errExpect := "unsupported type: <>"
if err := eR.createHeaderMap(record, 0, 0, "/tmp/fwvErs/out", trailerFields); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestFileFWVCreateHeaderMapError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
eR := &FWVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/fwvErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
record := "testRecord"
trailerFields := []*config.FCTemplate{
{
Tag: "OriginId",
Path: "*cgreq.OriginID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testOriginID", utils.InfieldSep),
},
}
//
eR.Config().Filters = []string{"Filter1"}
errExpect := "NOT_FOUND:Filter1"
if err := eR.createHeaderMap(record, 0, 0, "/tmp/fwvErs/out", trailerFields); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}

View File

@@ -36,21 +36,22 @@ import (
)
func NewJSONFileER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
jsonEr := &JSONFileER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
jsonEr.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
@@ -61,14 +62,15 @@ func NewJSONFileER(cfg *config.CGRConfig, cfgIdx int,
// JSONFileER implements EventReader interface for .json files
type JSONFileER struct {
sync.RWMutex
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
}
func (rdr *JSONFileER) Config() *config.EventReaderCfg {
@@ -167,7 +169,11 @@ func (rdr *JSONFileER) processFile(fPath, fName string) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -27,6 +27,7 @@ import (
"testing"
"time"
v2 "github.com/cgrates/cgrates/apier/v2"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/config"
@@ -224,7 +225,7 @@ func testJSONKillEngine(t *testing.T) {
func TestFileJSONServeErrTimeDuration0(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil)
rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
@@ -239,7 +240,7 @@ func TestFileJSONServeErrTimeDuration0(t *testing.T) {
func TestFileJSONServeErrTimeDurationNeg1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil)
rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
@@ -251,7 +252,51 @@ func TestFileJSONServeErrTimeDurationNeg1(t *testing.T) {
}
}
func TestFileJSONServeTimeDefault(t *testing.T) {
// func TestFileJSONServeTimeDefault(t *testing.T) {
// cfg := config.NewDefaultCGRConfig()
// cfgIdx := 0
// rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil,nil)
// if err != nil {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
// }
// rdr.Config().RunDelay = time.Duration(1)
// result := rdr.Serve()
// if !reflect.DeepEqual(result, nil) {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, result)
// }
// }
// func TestFileJSONServeTimeDefaultChanExit(t *testing.T) {
// cfg := config.NewDefaultCGRConfig()
// cfgIdx := 0
// rdrExit := make(chan struct{}, 1)
// rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, rdrExit)
// if err != nil {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
// }
// rdrExit <- struct{}{}
// rdr.Config().RunDelay = time.Duration(1)
// result := rdr.Serve()
// if !reflect.DeepEqual(result, nil) {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, result)
// }
// }
// func TestFileJSONProcessFile(t *testing.T) {
// cfg := config.NewDefaultCGRConfig()
// cfgIdx := 0
// rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil,nil)
// if err != nil {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
// }
// expected := "open : no such file or directory"
// err2 := rdr.(*JSONFileER).processFile("", "")
// if err2 == nil || err2.Error() != expected {
// t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, err2)
// }
// }
func TestFileJSONProcessEvent(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfgIdx := 0
rdr, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil)

View File

@@ -43,7 +43,7 @@ func TestNewJSONFileER(t *testing.T) {
}
cfg.ERsCfg().Readers[0].ConcurrentReqs = 1
cfg.ERsCfg().Readers[0].SourcePath = "/"
result, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil)
result, err := NewJSONFileER(cfg, cfgIdx, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}

View File

@@ -37,21 +37,22 @@ import (
)
func NewXMLFileER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
xmlER := &XMLFileER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
xmlER.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
@@ -62,14 +63,15 @@ func NewXMLFileER(cfg *config.CGRConfig, cfgIdx int,
// XMLFileER implements EventReader interface for .xml files
type XMLFileER struct {
sync.RWMutex
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
}
func (rdr *XMLFileER) Config() *config.EventReaderCfg {
@@ -166,7 +168,11 @@ func (rdr *XMLFileER) processFile(fPath, fName string) (err error) {
continue
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -20,9 +20,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"fmt"
"net/rpc"
"os"
"path"
"reflect"
"testing"
"time"
@@ -284,3 +286,412 @@ func testXMLITKillEngine(t *testing.T) {
t.Error(err)
}
}
func TestNewXMLFileER(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].SourcePath = "/tmp/xmlErs/out/"
cfg.ERsCfg().Readers[0].ConcurrentReqs = 1
fltrs := &engine.FilterS{}
expEr := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out",
rdrEvents: nil,
rdrError: nil,
rdrExit: nil,
conReqs: make(chan struct{}, 1),
}
var value struct{}
expEr.conReqs <- value
eR, err := NewXMLFileER(cfg, 0, nil, nil, nil, fltrs, nil)
expConReq := make(chan struct{}, 1)
expConReq <- struct{}{}
if <-expConReq != <-eR.(*XMLFileER).conReqs {
t.Errorf("Expected %v but received %v", <-expConReq, <-eR.(*XMLFileER).conReqs)
}
expEr.conReqs = nil
eR.(*XMLFileER).conReqs = nil
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(eR.(*XMLFileER), expEr) {
t.Errorf("Expected %v but received %v", expEr.conReqs, eR.(*XMLFileER).conReqs)
}
}
func TestFileXMLProcessEvent(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := &engine.FilterS{}
filePath := "/tmp/TestFileXMLProcessEvent/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.xml"))
if err != nil {
t.Error(err)
}
xmlData := `<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE broadWorksCDR>
<broadWorksCDR version="19.0">
<cdrData>
<basicModule>
<localCallId>
<localCallId>25160047719:0</localCallId>
</localCallId>
</basicModule>
</cdrData>
</broadWorksCDR>
`
file.Write([]byte(xmlData))
file.Close()
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
//or set the default Fields of cfg.ERsCfg().Readers[0].Fields
eR.Config().Fields = []*config.FCTemplate{
{
Tag: "OriginID",
Type: utils.MetaConstant,
Path: "*cgreq.OriginID",
Value: config.NewRSRParsersMustCompile("25160047719:0", utils.InfieldSep),
Mandatory: true,
},
}
eR.Config().Fields[0].ComputePath()
eR.conReqs <- struct{}{}
fileName := "file1.xml"
if err := eR.processFile(filePath, fileName); err != nil {
t.Error(err)
}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"OriginID": "25160047719:0",
},
APIOpts: make(map[string]interface{}),
}
select {
case data := <-eR.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(data.cgrEvent, expEvent) {
t.Errorf("Expected %v but received %v", utils.ToJSON(expEvent), utils.ToJSON(data.cgrEvent))
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileXMLProcessEventError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
filePath := "/tmp/TestFileXMLProcessEvent/"
fname := "file1.xml"
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
errExpect := "open /tmp/TestFileXMLProcessEvent/file1.xml: no such file or directory"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestFileXMLProcessEVentError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].Fields = []*config.FCTemplate{}
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
filePath := "/tmp/TestFileXMLProcessEvent/"
fname := "file1.xml"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.xml"))
if err != nil {
t.Error(err)
}
xmlData := `<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE broadWorksCDR>
<broadWorksCDR version="19.0">
<cdrData>
<basicModule>
<localCallId>
<localCallId>25160047719:0</localCallId>
</localCallId>
</basicModule>
</cdrData>
</broadWorksCDR>
`
file.Write([]byte(xmlData))
file.Close()
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().Tenant = config.RSRParsers{
{
Rules: "test",
},
}
//
eR.Config().Filters = []string{"Filter1"}
errExpect := "NOT_FOUND:Filter1"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
//
eR.Config().Filters = []string{"*exists:~*req..Account:"}
errExpect = "rename /tmp/TestFileXMLProcessEvent/file1.xml /var/spool/cgrates/ers/out/file1.xml: no such file or directory"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileXMLProcessEVentError3(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
// fltrs := &engine.FilterS{}
fltrs := &engine.FilterS{}
filePath := "/tmp/TestFileXMLProcessEvent/"
fname := "file1.xml"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.xml"))
if err != nil {
t.Error(err)
}
xmlData := `<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE broadWorksCDR>
<broadWorksCDR version="19.0">
<cdrData>
<basicModule>
<localCallId>
<localCallId>25160047719:0</localCallId>
</localCallId>
</basicModule>
</cdrData>
</broadWorksCDR>
`
file.Write([]byte(xmlData))
file.Close()
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().Fields = []*config.FCTemplate{
{
Tag: "OriginID",
Type: utils.MetaConstant,
Path: "*cgreq.OriginID",
Value: nil,
Mandatory: true,
},
}
eR.Config().Fields[0].ComputePath()
errExpect := "Empty source value for fieldID: <OriginID>"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileXMLProcessEventParseError(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := &engine.FilterS{}
filePath := "/tmp/TestFileXMLProcessEvent/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.xml"))
if err != nil {
t.Error(err)
}
file.Write([]byte(`
<XMLField>test/XMLField>`))
file.Close()
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
fileName := "file1.xml"
errExpect := "XML syntax error on line 2: unexpected EOF"
if err := eR.processFile(filePath, fileName); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestFileXML(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
err := os.MkdirAll(eR.rdrDir, 0777)
if err != nil {
t.Error(err)
}
eR.Config().Fields = []*config.FCTemplate{
{
Tag: "OriginID",
Type: utils.MetaConstant,
Path: "*cgreq.OriginID",
Value: config.NewRSRParsersMustCompile("25160047719:0", utils.InfieldSep),
Mandatory: true,
},
}
eR.Config().Fields[0].ComputePath()
for i := 1; i < 4; i++ {
if _, err := os.Create(path.Join(eR.rdrDir, fmt.Sprintf("file%d.xml", i))); err != nil {
t.Error(err)
}
}
eR.Config().RunDelay = time.Duration(-1)
if err := eR.Serve(); err != nil {
t.Error(err)
}
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
os.Create(path.Join(eR.rdrDir, "file1.txt"))
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
}
func TestFileXMLError(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErsError/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
err := os.MkdirAll(eR.rdrDir, 0777)
if err != nil {
t.Error(err)
}
eR.Config().Fields = []*config.FCTemplate{
{
Tag: "OriginID",
Type: utils.MetaConstant,
Path: "*cgreq.OriginID",
Value: config.NewRSRParsersMustCompile("25160047719:0", utils.InfieldSep),
Mandatory: true,
},
}
eR.Config().Fields[0].ComputePath()
for i := 1; i < 4; i++ {
if _, err := os.Create(path.Join(eR.rdrDir, fmt.Sprintf("file%d.xml", i))); err != nil {
t.Error(err)
}
}
os.Create(path.Join(eR.rdrDir, "file1.txt"))
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
}
func TestFileXMLExit(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &XMLFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/xmlErs/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
eR.rdrExit <- struct{}{}
}

View File

@@ -38,7 +38,7 @@ func TestERSNewXMLFileER(t *testing.T) {
rdrExit: nil,
conReqs: nil,
}
result, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil)
result, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", nil, err)
}
@@ -60,7 +60,7 @@ func TestERSXMLFileERConfig(t *testing.T) {
Filters: []string{},
Opts: make(map[string]interface{}),
}
result1, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil)
result1, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", nil, err)
}
@@ -82,7 +82,7 @@ func TestERSXMLFileERServeNil(t *testing.T) {
Filters: []string{},
Opts: make(map[string]interface{}),
}
result1, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil)
result1, err := NewXMLFileER(cfg, 0, nil, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", nil, err)
}

View File

@@ -1,288 +0,0 @@
/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package ers
import (
"encoding/csv"
"fmt"
"io"
"os"
"path"
"strings"
"sync"
"time"
"github.com/cgrates/birpc/context"
"github.com/cgrates/ltcache"
"github.com/cgrates/cgrates/agents"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
type fstRecord struct {
method string
values []string
fileName string
}
func NewFlatstoreER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
flatER := &FlatstoreER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs),
}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
flatER.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
}
var ttl time.Duration
if ttlOpt, has := flatER.Config().Opts[utils.FstPartialRecordCacheOpt]; has {
if ttl, err = utils.IfaceAsDuration(ttlOpt); err != nil {
return
}
}
flatER.cache = ltcache.NewCache(ltcache.UnlimitedCaching, ttl, false, flatER.dumpToFile)
return flatER, nil
}
// FlatstoreER implements EventReader interface for Flatstore CDR
type FlatstoreER struct {
sync.RWMutex
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
cache *ltcache.Cache
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
}
func (rdr *FlatstoreER) Config() *config.EventReaderCfg {
return rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx]
}
func (rdr *FlatstoreER) Serve() (err error) {
switch rdr.Config().RunDelay {
case time.Duration(0): // 0 disables the automatic read, maybe done per API
return
case time.Duration(-1):
return utils.WatchDir(rdr.rdrDir, rdr.processFile,
utils.ERs, rdr.rdrExit)
default:
go func() {
tm := time.NewTimer(0)
for {
// Not automated, process and sleep approach
select {
case <-rdr.rdrExit:
tm.Stop()
utils.Logger.Info(
fmt.Sprintf("<%s> stop monitoring path <%s>",
utils.ERs, rdr.rdrDir))
return
case <-tm.C:
}
filesInDir, _ := os.ReadDir(rdr.rdrDir)
for _, file := range filesInDir {
if !strings.HasSuffix(file.Name(), utils.CSVSuffix) { // hardcoded file extension for csv event reader
continue // used in order to filter the files from directory
}
go func(fileName string) {
if err := rdr.processFile(rdr.rdrDir, fileName); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> processing file %s, error: %s",
utils.ERs, fileName, err.Error()))
}
}(file.Name())
}
tm.Reset(rdr.Config().RunDelay)
}
}()
}
return
}
// processFile is called for each file in a directory and dispatches erEvents from it
func (rdr *FlatstoreER) processFile(fPath, fName string) (err error) {
if cap(rdr.conReqs) != 0 { // 0 goes for no limit
processFile := <-rdr.conReqs // Queue here for maxOpenFiles
defer func() { rdr.conReqs <- processFile }()
}
absPath := path.Join(fPath, fName)
utils.Logger.Info(
fmt.Sprintf("<%s> parsing <%s>", utils.ERs, absPath))
var file *os.File
if file, err = os.Open(absPath); err != nil {
return
}
defer file.Close()
var csvReader *csv.Reader
if csvReader, err = newCSVReader(file, rdr.Config().Opts, utils.FlatstorePrfx); err != nil {
utils.Logger.Err(
fmt.Sprintf("<%s> failed creating flatStore reader for <%s>, due to option parsing error: <%s>",
utils.ERs, rdr.Config().ID, err.Error()))
return
}
rowNr := 0 // This counts the rows in the file, not really number of CDRs
evsPosted := 0
timeStart := time.Now()
reqVars := &utils.DataNode{Type: utils.NMMapType, Map: map[string]*utils.DataNode{utils.FileName: utils.NewLeafNode(fName)}}
faildCallPrfx := utils.IfaceAsString(rdr.Config().Opts[utils.FstFailedCallsPrefixOpt])
failedCallsFile := len(faildCallPrfx) != 0 && strings.HasPrefix(fName, faildCallPrfx)
var methodTmp config.RSRParsers
if methodTmp, err = config.NewRSRParsers(utils.IfaceAsString(rdr.Config().Opts[utils.FstMethodOpt]), rdr.cgrCfg.GeneralCfg().RSRSep); err != nil {
return
}
var originTmp config.RSRParsers
if originTmp, err = config.NewRSRParsers(utils.IfaceAsString(rdr.Config().Opts[utils.FstOriginIDOpt]), rdr.cgrCfg.GeneralCfg().RSRSep); err != nil {
return
}
var mandatoryAcK bool
if mandatoryAcK, err = utils.IfaceAsBool(rdr.Config().Opts[utils.FstMadatoryACKOpt]); err != nil {
return
}
for {
var record []string
if record, err = csvReader.Read(); err != nil {
if err == io.EOF {
break
}
return
}
req := config.NewSliceDP(record, nil)
tmpReq := utils.MapStorage{utils.MetaReq: req}
var method string
if method, err = methodTmp.ParseDataProvider(tmpReq); err != nil {
return
} else if method != utils.FstInvite &&
method != utils.FstBye &&
method != utils.FstAck {
return fmt.Errorf("unsupported method: <%q>", method)
}
var originID string
if originID, err = originTmp.ParseDataProvider(tmpReq); err != nil {
return
}
cacheKey := utils.ConcatenatedKey(originID, method)
if rdr.cache.HasItem(cacheKey) {
utils.Logger.Warning(fmt.Sprintf("<%s> Overwriting the %s method for record <%s>", utils.ERs, method, originID))
rdr.cache.Set(cacheKey, &fstRecord{method: method, values: record, fileName: fName}, []string{originID})
continue
}
records := rdr.cache.GetGroupItems(originID)
if lrecords := len(records); !failedCallsFile && // do not set in cache if we know that the calls are failed
(lrecords == 0 ||
(mandatoryAcK && lrecords != 2) ||
(!mandatoryAcK && lrecords != 1)) {
rdr.cache.Set(cacheKey, &fstRecord{method: method, values: record, fileName: fName}, []string{originID})
continue
}
extraDP := map[string]utils.DataProvider{utils.FstMethodToPrfx[method]: req}
for _, record := range records {
req := record.(*fstRecord)
rdr.cache.Set(utils.ConcatenatedKey(originID, req.method), nil, []string{originID})
extraDP[utils.FstMethodToPrfx[req.method]] = config.NewSliceDP(req.values, nil)
}
rdr.cache.RemoveGroup(originID)
rowNr++ // increment the rowNr after checking if it's not the end of file
agReq := agents.NewAgentRequest(
req, reqVars,
nil, nil, nil, rdr.Config().Tenant,
rdr.cgrCfg.GeneralCfg().DefaultTenant,
utils.FirstNonEmpty(rdr.Config().Timezone,
rdr.cgrCfg.GeneralCfg().DefaultTimezone),
rdr.fltrS, nil) // create an AgentRequest
if pass, err := rdr.fltrS.Pass(context.TODO(), agReq.Tenant, rdr.Config().Filters,
agReq); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading file: <%s> row <%d>, ignoring due to filter error: <%s>",
utils.ERs, absPath, rowNr, err.Error()))
return err
} else if !pass {
continue
}
if err = agReq.SetFields(rdr.Config().Fields); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading file: <%s> row <%d>, ignoring due to error: <%s>",
utils.ERs, absPath, rowNr, err.Error()))
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
evsPosted++
}
if rdr.Config().ProcessedPath != "" {
// Finished with file, move it to processed folder
outPath := path.Join(rdr.Config().ProcessedPath, fName)
if err = os.Rename(absPath, outPath); err != nil {
return
}
}
utils.Logger.Info(
fmt.Sprintf("%s finished processing file <%s>. Total records processed: %d, events posted: %d, run duration: %s",
utils.ERs, absPath, rowNr, evsPosted, time.Now().Sub(timeStart)))
return
}
func (rdr *FlatstoreER) dumpToFile(itmID string, value interface{}) {
if value == nil {
return
}
unpRcd := value.(*fstRecord)
dumpFilePath := path.Join(rdr.Config().ProcessedPath, unpRcd.fileName+utils.TmpSuffix)
fileOut, err := os.Create(dumpFilePath)
if err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed creating %s, error: %s",
utils.ERs, dumpFilePath, err.Error()))
return
}
csvWriter := csv.NewWriter(fileOut)
csvWriter.Comma = rune(utils.IfaceAsString(rdr.Config().Opts[utils.FlatstorePrfx+utils.FieldSepOpt])[0])
if err = csvWriter.Write(unpRcd.values); err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed writing partial record %v to file: %s, error: %s",
utils.ERs, unpRcd.values, dumpFilePath, err.Error()))
// return // let it close the opened file
}
csvWriter.Flush()
fileOut.Close()
}

View File

@@ -21,21 +21,16 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"bytes"
"fmt"
"log"
"net/rpc"
"os"
"path"
"reflect"
"strings"
"path/filepath"
"testing"
"time"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
"github.com/cgrates/ltcache"
)
var (
@@ -51,16 +46,16 @@ BYE|f9d3d5c3|c863a6e3|214d8f52b566e33a9349b184e72a4cca@0:0:0:0:0:0:0:0|200|OK|14
INVITE|36e39a5|42d996f9|3a63321dd3b325eec688dc2aefb6ac2d@0:0:0:0:0:0:0:0|200|OK|1436454657|*prepaid|1001|1002||2407:1884881533
BYE|36e39a5|42d996f9|3a63321dd3b325eec688dc2aefb6ac2d@0:0:0:0:0:0:0:0|200|OK|1436454661|||||2407:1884881533
INVITE|3111f3c9|49ca4c42|a58ebaae40d08d6757d8424fb09c4c54@0:0:0:0:0:0:0:0|200|OK|1436454690|*prepaid|1001|1002||3099:1909036290
BYE|3111f3c9|49ca4c42|a58ebaae40d08d6757d8424fb09c4c54@0:0:0:0:0:0:0:0|200|OK|1436454692|||||3099:1909036290`
BYE|3111f3c9|49ca4c42|a58ebaae40d08d6757d8424fb09c4c54@0:0:0:0:0:0:0:0|200|OK|1436454692|||||3099:1909036290` // 4
fullMissed = `INVITE|ef6c6256|da501581|0bfdd176d1b93e7df3de5c6f4873ee04@0:0:0:0:0:0:0:0|487|Request Terminated|1436454643|*prepaid|1001|1002||1224:339382783
INVITE|7905e511||81880da80a94bda81b425b09009e055c@0:0:0:0:0:0:0:0|404|Not Found|1436454668|*prepaid|1001|1002||1980:1216490844
INVITE|324cb497|d4af7023|8deaadf2ae9a17809a391f05af31afb0@0:0:0:0:0:0:0:0|486|Busy here|1436454687|*postpaid|1002|1001||474:130115066`
INVITE|324cb497|d4af7023|8deaadf2ae9a17809a391f05af31afb0@0:0:0:0:0:0:0:0|486|Busy here|1436454687|*postpaid|1002|1001||474:130115066` // 3
part1 = `BYE|f9d3d5c3|c863a6e3|214d8f52b566e33a9349b184e72a4ccb@0:0:0:0:0:0:0:0|200|OK|1436454651|||||1877:893549742`
part1 = `BYE|f9d3d5c3|c863a6e3|214d8f52b566e33a9349b184e72a4ccb@0:0:0:0:0:0:0:0|200|OK|1436454651|||||1877:893549742` //1
part2 = `INVITE|f9d3d5c3|c863a6e3|214d8f52b566e33a9349b184e72a4ccb@0:0:0:0:0:0:0:0|200|OK|1436454647|*postpaid|1002|1003||1877:893549742
INVITE|2daec40c|548625ac|dd0c4c617a9919d29a6175cdff223a9p@0:0:0:0:0:0:0:0|200|OK|1436454408|*prepaid|1001|1002||3401:2069362475`
INVITE|2daec40c|548625ac|dd0c4c617a9919d29a6175cdff223a9p@0:0:0:0:0:0:0:0|200|OK|1436454408|*prepaid|1001|1002||3401:2069362475` //1
flatstoreTests = []func(t *testing.T){
testCreateDirs,
@@ -182,18 +177,25 @@ func testFlatstoreITHandleCdr1File(t *testing.T) {
t.Errorf("Files in ersInDir: %+v", fls)
}
filesOutDir, _ := os.ReadDir("/tmp/flatstoreErs/out")
ids := []string{}
for _, fD := range filesOutDir {
ids = append(ids, fD.Name())
}
if len(filesOutDir) != 5 {
ids := []string{}
for _, fD := range filesOutDir {
ids = append(ids, fD.Name())
}
t.Errorf("Unexpected number of files in output directory: %+v, %q", len(filesOutDir), ids)
}
ePartContent := "INVITE|2daec40c|548625ac|dd0c4c617a9919d29a6175cdff223a9p@0:0:0:0:0:0:0:0|200|OK|1436454408|*prepaid|1001|1002||3401:2069362475\n"
if partContent, err := os.ReadFile(path.Join("/tmp/flatstoreErs/out", "acc_3.log.tmp")); err != nil {
tmpl := path.Join("/tmp/flatstoreErs/out", "f7aed15c98b31fea0e3b02b52fc947879a3c5bbc.*.tmp")
if match, err := filepath.Glob(tmpl); err != nil {
t.Error(err)
} else if (ePartContent) != (string(partContent)) {
t.Errorf("Expecting:\n%s\nReceived:\n%s", ePartContent, string(partContent))
} else if len(match) != 1 {
t.Errorf("Wrong number of files matches the template: %q", match)
t.Errorf("template: %q", tmpl)
t.Errorf("files: %q", ids)
} else if partContent, err := os.ReadFile(match[0]); err != nil {
t.Error(err)
} else if ePartContent != string(partContent) {
t.Errorf("Expecting:\n%q\nReceived:\n%q", ePartContent, string(partContent))
}
}
@@ -203,6 +205,7 @@ func testFlatstoreITAnalyseCDRs(t *testing.T) {
t.Error("Unexpected error: ", err.Error())
} else if len(reply) != 8 {
t.Error("Unexpected number of CDRs returned: ", len(reply))
t.Error(utils.ToJSON(reply))
}
if err := flatstoreRPC.Call(utils.APIerSv2GetCDRs, &utils.RPCCDRsFilter{MinUsage: "1"}, &reply); err != nil {
t.Error("Unexpected error: ", err.Error())
@@ -217,6 +220,7 @@ func testFlatstoreITKillEngine(t *testing.T) {
}
}
/*
func TestFlatstoreProcessEvent(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
@@ -657,3 +661,4 @@ func TestFlatstoreServeErrTimeDurationNeg1(t *testing.T) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, err)
}
}
*/

View File

@@ -1,114 +0,0 @@
/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package ers
import (
"reflect"
"testing"
"time"
"github.com/cgrates/cgrates/utils"
"github.com/cgrates/cgrates/config"
)
func TestNewFlatstoreER(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
expected := &FlatstoreER{
cgrCfg: cfg,
}
cfg.ERsCfg().Readers[0].SourcePath = "/"
result, err := NewFlatstoreER(cfg, 0, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
result.(*FlatstoreER).cache = nil
result.(*FlatstoreER).conReqs = nil
if !reflect.DeepEqual(result, expected) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, result)
}
}
func TestFlatstoreConfig(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers = []*config.EventReaderCfg{
{
ID: "file_reader1",
Type: utils.MetaFileCSV,
RunDelay: -1,
ConcurrentReqs: 1024,
SourcePath: "/tmp/ers/in",
ProcessedPath: "/tmp/ers/out",
Tenant: nil,
Timezone: utils.EmptyString,
Filters: []string{},
Flags: utils.FlagsWithParams{},
Opts: map[string]interface{}{utils.FlatstorePrfx + utils.RowLengthOpt: 5},
},
{
ID: "file_reader2",
Type: utils.MetaFileCSV,
RunDelay: -1,
ConcurrentReqs: 1024,
SourcePath: "/tmp/ers/in",
ProcessedPath: "/tmp/ers/out",
Tenant: nil,
Timezone: utils.EmptyString,
Filters: []string{},
Flags: utils.FlagsWithParams{},
Opts: map[string]interface{}{utils.FlatstorePrfx + utils.RowLengthOpt: 5},
},
}
expected := cfg.ERsCfg().Readers[0]
rdr, err := NewFlatstoreER(cfg, 0, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", nil, err)
}
result := rdr.Config()
if !reflect.DeepEqual(result, expected) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", expected, result)
}
}
func TestFlatstoreServeNil(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
result, err := NewFlatstoreER(cfg, 0, nil, nil, nil, nil)
if err != nil {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", nil, err)
}
expected := &FlatstoreER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: nil,
cache: result.(*FlatstoreER).cache,
rdrDir: "/var/spool/cgrates/ers/in",
rdrEvents: nil,
rdrError: nil,
rdrExit: nil,
conReqs: result.(*FlatstoreER).conReqs,
}
if !reflect.DeepEqual(expected, result) {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", expected, result)
}
result.Config().RunDelay = time.Duration(0)
err = result.Serve()
if err != nil {
t.Errorf("\nExpected: <%+v>, \nreceived: <%+v>", nil, err)
}
}

View File

@@ -35,16 +35,17 @@ import (
// NewKafkaER return a new kafka event reader
func NewKafkaER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -72,10 +73,11 @@ type KafkaER struct {
groupID string
maxWait time.Duration
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
poster engine.Poster
}
@@ -173,7 +175,11 @@ func (rdr *KafkaER) processMessage(msg []byte) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -20,6 +20,19 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
kafka "github.com/segmentio/kafka-go"
)
var (
rdrEvents chan *erEvent
rdrErr chan error
@@ -27,7 +40,6 @@ var (
rdr EventReader
)
/*
func TestKafkaER(t *testing.T) {
cfg, err := config.NewCGRConfigFromJSONStringWithDefaults(`{
"ers": { // EventReaderService
@@ -58,7 +70,7 @@ func TestKafkaER(t *testing.T) {
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
if rdr, err = NewKafkaER(cfg, 1, rdrEvents,
if rdr, err = NewKafkaER(cfg, 1, rdrEvents, make(chan *erEvent, 1),
rdrErr, new(engine.FilterS), rdrExit); err != nil {
t.Fatal(err)
}
@@ -93,7 +105,7 @@ func TestKafkaER(t *testing.T) {
Event: map[string]interface{}{
"CGRID": randomCGRID,
},
Opts: map[string]interface{}{},
APIOpts: map[string]interface{}{},
}
if !reflect.DeepEqual(ev.cgrEvent, expected) {
t.Errorf("Expected %s ,received %s", utils.ToJSON(expected), utils.ToJSON(ev.cgrEvent))
@@ -103,4 +115,3 @@ func TestKafkaER(t *testing.T) {
}
close(rdrExit)
}
*/

View File

@@ -19,9 +19,12 @@ along with this program. If not, see <http://www.gnu.org/licenses/>
package ers
import (
"reflect"
"testing"
"time"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
@@ -93,3 +96,169 @@ func TestKafkasetOpts(t *testing.T) {
t.Errorf("Expected: %s ,received: %s", expKafka.maxWait, k.maxWait)
}
}
func TestKafkaERServe(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrS := new(engine.FilterS)
rdrEvents := make(chan *erEvent, 1)
rdrExit := make(chan struct{}, 1)
rdrErr := make(chan error, 1)
rdr, err := NewKafkaER(cfg, 0, rdrEvents, make(chan *erEvent, 1), rdrErr, fltrS, rdrExit)
if err != nil {
t.Error(err)
}
if err := rdr.Serve(); err != nil {
t.Error(err)
}
rdr.Config().RunDelay = 1 * time.Millisecond
if err := rdr.Serve(); err != nil {
t.Error(err)
}
rdr.Config().Opts = map[string]interface{}{}
rdr.Config().ProcessedPath = ""
rdr.(*KafkaER).createPoster()
}
func TestKafkaERServe2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
dialURL: "testURL",
groupID: "testGroupID",
topic: "testTopic",
maxWait: time.Duration(1),
cap: make(chan struct{}, 1),
poster: engine.NewKafkaPoster("url", 1, make(map[string]interface{})),
}
rdr.rdrExit <- struct{}{}
rdr.Config().RunDelay = 1 * time.Millisecond
if err := rdr.Serve(); err != nil {
t.Error(err)
}
}
func TestKafkaERProcessMessage(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
dialURL: "testURL",
groupID: "testGroupID",
topic: "testTopic",
maxWait: time.Duration(1),
cap: make(chan struct{}, 1),
}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: map[string]interface{}{
utils.ToR: "*voice",
},
APIOpts: map[string]interface{}{},
}
rdr.Config().Fields = []*config.FCTemplate{
{
Tag: "Tor",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("*voice", utils.InfieldSep),
Path: "*cgreq.ToR",
},
}
rdr.Config().Fields[0].ComputePath()
msg := []byte(`{"test":"input"}`)
if err := rdr.processMessage(msg); err != nil {
t.Error(err)
}
select {
case data := <-rdr.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(data.cgrEvent, expEvent) {
t.Errorf("Expected %v but received %v", utils.ToJSON(expEvent), utils.ToJSON(data.cgrEvent))
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
}
func TestKafkaERProcessMessageError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
dialURL: "testURL",
groupID: "testGroupID",
topic: "testTopic",
maxWait: time.Duration(1),
cap: make(chan struct{}, 1),
}
rdr.Config().Fields = []*config.FCTemplate{
{},
}
msg := []byte(`{"test":"input"}`)
errExpect := "unsupported type: <>"
if err := rdr.processMessage(msg); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestKafkaERProcessMessageError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
fltrs := engine.NewFilterS(cfg, nil, dm)
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
dialURL: "testURL",
groupID: "testGroupID",
topic: "testTopic",
maxWait: time.Duration(1),
cap: make(chan struct{}, 1),
}
rdr.Config().Filters = []string{"Filter1"}
msg := []byte(`{"test":"input"}`)
errExpect := "NOT_FOUND:Filter1"
if err := rdr.processMessage(msg); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestKafkaERProcessMessageError3(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &KafkaER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
dialURL: "testURL",
groupID: "testGroupID",
topic: "testTopic",
maxWait: time.Duration(1),
cap: make(chan struct{}, 1),
}
msg := []byte(`{"invalid":"input"`)
errExpect := "unexpected end of JSON input"
if err := rdr.processMessage(msg); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}

View File

@@ -1,411 +0,0 @@
/*
Real-time Online/Offline Charging System (OerS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package ers
import (
"encoding/csv"
"fmt"
"io"
"os"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/cgrates/birpc/context"
"github.com/cgrates/cgrates/agents"
"github.com/cgrates/ltcache"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
func NewPartialCSVFileER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
srcPath := cfg.ERsCfg().Readers[cfgIdx].SourcePath
if strings.HasSuffix(srcPath, utils.Slash) {
srcPath = srcPath[:len(srcPath)-1]
}
pCSVFileER := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrDir: srcPath,
rdrEvents: rdrEvents,
rdrError: rdrErr,
rdrExit: rdrExit,
conReqs: make(chan struct{}, cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs)}
function := pCSVFileER.postCDR
if utils.IfaceAsString(pCSVFileER.Config().Opts[utils.PartialCSVCacheExpiryActionOpt]) == utils.MetaDumpToFile {
function = pCSVFileER.dumpToFile
}
var processFile struct{}
for i := 0; i < cfg.ERsCfg().Readers[cfgIdx].ConcurrentReqs; i++ {
pCSVFileER.conReqs <- processFile // Empty initiate so we do not need to wait later when we pop
}
var ttl time.Duration
if ttlOpt, has := pCSVFileER.Config().Opts[utils.PartialCSVRecordCacheOpt]; has {
if ttl, err = utils.IfaceAsDuration(ttlOpt); err != nil {
return
}
}
pCSVFileER.cache = ltcache.NewCache(ltcache.UnlimitedCaching, ttl, false, function)
return pCSVFileER, nil
}
// CSVFileER implements EventReader interface for .csv files
type PartialCSVFileER struct {
sync.RWMutex
cgrCfg *config.CGRConfig
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
cache *ltcache.Cache
rdrDir string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrError chan error
rdrExit chan struct{}
conReqs chan struct{} // limit number of opened files
}
func (rdr *PartialCSVFileER) Config() *config.EventReaderCfg {
return rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx]
}
func (rdr *PartialCSVFileER) Serve() (err error) {
switch rdr.Config().RunDelay {
case time.Duration(0): // 0 disables the automatic read, maybe done per API
return
case time.Duration(-1):
return utils.WatchDir(rdr.rdrDir, rdr.processFile,
utils.ERs, rdr.rdrExit)
default:
go func() {
tm := time.NewTimer(0)
for {
// Not automated, process and sleep approach
select {
case <-rdr.rdrExit:
tm.Stop()
utils.Logger.Info(
fmt.Sprintf("<%s> stop monitoring path <%s>",
utils.ERs, rdr.rdrDir))
return
case <-tm.C:
}
filesInDir, _ := os.ReadDir(rdr.rdrDir)
for _, file := range filesInDir {
if !strings.HasSuffix(file.Name(), utils.CSVSuffix) { // hardcoded file extension for csv event reader
continue // used in order to filter the files from directory
}
go func(fileName string) {
if err := rdr.processFile(rdr.rdrDir, fileName); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> processing file %s, error: %s",
utils.ERs, fileName, err.Error()))
}
}(file.Name())
}
tm.Reset(rdr.Config().RunDelay)
}
}()
}
return
}
// processFile is called for each file in a directory and dispatches erEvents from it
func (rdr *PartialCSVFileER) processFile(fPath, fName string) (err error) {
if cap(rdr.conReqs) != 0 { // 0 goes for no limit
processFile := <-rdr.conReqs // Queue here for maxOpenFiles
defer func() { rdr.conReqs <- processFile }()
}
absPath := path.Join(fPath, fName)
utils.Logger.Info(
fmt.Sprintf("<%s> parsing <%s>", utils.ERs, absPath))
var file *os.File
if file, err = os.Open(absPath); err != nil {
return
}
defer file.Close()
var csvReader *csv.Reader
if csvReader, err = newCSVReader(file, rdr.Config().Opts, utils.CSV); err != nil {
utils.Logger.Err(
fmt.Sprintf("<%s> failed creating CSV reader for <%s>, due to option parsing error: <%s>",
utils.ERs, rdr.Config().ID, err.Error()))
return
}
var indxAls map[string]int
rowNr := 0 // This counts the rows in the file, not really number of CDRs
evsPosted := 0
timeStart := time.Now()
reqVars := &utils.DataNode{Type: utils.NMMapType, Map: map[string]*utils.DataNode{utils.FileName: utils.NewLeafNode(fName)}}
hdrDefChar := utils.IfaceAsString(rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx].Opts[utils.HeaderDefineCharOpt])
for {
var record []string
if record, err = csvReader.Read(); err != nil {
if err == io.EOF {
break
}
return
}
if rowNr == 0 && len(record) > 0 &&
strings.HasPrefix(record[0], hdrDefChar) {
record[0] = strings.TrimPrefix(record[0], hdrDefChar)
// map the templates
indxAls = make(map[string]int)
for i, hdr := range record {
indxAls[hdr] = i
}
continue
}
rowNr++ // increment the rowNr after checking if it's not the end of file
agReq := agents.NewAgentRequest(
config.NewSliceDP(record, indxAls), reqVars,
nil, nil, nil, rdr.Config().Tenant,
rdr.cgrCfg.GeneralCfg().DefaultTenant,
utils.FirstNonEmpty(rdr.Config().Timezone,
rdr.cgrCfg.GeneralCfg().DefaultTimezone),
rdr.fltrS, nil) // create an AgentRequest
if pass, err := rdr.fltrS.Pass(context.TODO(), agReq.Tenant, rdr.Config().Filters,
agReq); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading file: <%s> row <%d>, ignoring due to filter error: <%s>",
utils.ERs, absPath, rowNr, err.Error()))
return err
} else if !pass {
continue
}
if err = agReq.SetFields(rdr.Config().Fields); err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> reading file: <%s> row <%d>, ignoring due to error: <%s>",
utils.ERs, absPath, rowNr, err.Error()))
return
}
// take OriginID and OriginHost to compose CGRID
orgID, err := agReq.CGRRequest.FieldAsString([]string{utils.OriginID})
if err == utils.ErrNotFound {
utils.Logger.Warning(
fmt.Sprintf("<%s> Missing <OriginID> field for row <%d> , <%s>",
utils.ERs, rowNr, record))
continue
}
orgHost, err := agReq.CGRRequest.FieldAsString([]string{utils.OriginHost})
if err == utils.ErrNotFound {
utils.Logger.Warning(
fmt.Sprintf("<%s> Missing <OriginHost> field for row <%d> , <%s>",
utils.ERs, rowNr, record))
continue
}
cgrID := utils.Sha1(orgID, orgHost)
// take Partial field from NavigableMap
partial, _ := agReq.CGRRequest.FieldAsString([]string{utils.Partial})
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
if val, has := rdr.cache.Get(cgrID); !has {
if utils.IsSliceMember([]string{"false", utils.EmptyString}, partial) { // complete CDR
rdr.rdrEvents <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
evsPosted++
} else {
rdr.cache.Set(cgrID,
[]*utils.CGREvent{cgrEv}, nil)
}
} else {
origCgrEvs := val.([]*utils.CGREvent)
origCgrEvs = append(origCgrEvs, cgrEv)
if utils.IsSliceMember([]string{"false", utils.EmptyString}, partial) { // complete CDR
//sort CGREvents based on AnswertTime and SetupTime
sort.Slice(origCgrEvs, func(i, j int) bool {
aTime, err := origCgrEvs[i].FieldAsTime(utils.AnswerTime, agReq.Timezone)
if err != nil && err == utils.ErrNotFound {
sTime, _ := origCgrEvs[i].FieldAsTime(utils.SetupTime, agReq.Timezone)
sTime2, _ := origCgrEvs[j].FieldAsTime(utils.SetupTime, agReq.Timezone)
return sTime.Before(sTime2)
}
aTime2, _ := origCgrEvs[j].FieldAsTime(utils.AnswerTime, agReq.Timezone)
return aTime.Before(aTime2)
})
// compose the CGREvent from slice
cgrEv := new(utils.CGREvent)
cgrEv.ID = utils.UUIDSha1Prefix()
cgrEv.Time = utils.TimePointer(time.Now())
cgrEv.APIOpts = map[string]interface{}{}
for i, origCgrEv := range origCgrEvs {
if i == 0 {
cgrEv.Tenant = origCgrEv.Tenant
}
for key, value := range origCgrEv.Event {
cgrEv.Event[key] = value
}
for key, val := range origCgrEv.APIOpts {
cgrEv.APIOpts[key] = val
}
}
rdr.rdrEvents <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
evsPosted++
rdr.cache.Set(cgrID, nil, nil)
rdr.cache.Remove(cgrID)
} else {
// overwrite the cache value with merged NavigableMap
rdr.cache.Set(cgrID, origCgrEvs, nil)
}
}
}
if rdr.Config().ProcessedPath != "" {
// Finished with file, move it to processed folder
outPath := path.Join(rdr.Config().ProcessedPath, fName)
if err = os.Rename(absPath, outPath); err != nil {
return
}
}
utils.Logger.Info(
fmt.Sprintf("%s finished processing file <%s>. Total records processed: %d, events posted: %d, run duration: %s",
utils.ERs, absPath, rowNr, evsPosted, time.Now().Sub(timeStart)))
return
}
func (rdr *PartialCSVFileER) dumpToFile(itmID string, value interface{}) {
tmz := utils.FirstNonEmpty(rdr.Config().Timezone,
rdr.cgrCfg.GeneralCfg().DefaultTimezone)
origCgrEvs := value.([]*utils.CGREvent)
for _, origCgrEv := range origCgrEvs {
// complete CDR are handling in processFile function
if partial, _ := origCgrEv.FieldAsString(utils.Partial); utils.IsSliceMember([]string{"false", utils.EmptyString}, partial) {
return
}
}
// Need to process the first event separate to take the name for the file
cdr, err := engine.NewMapEvent(origCgrEvs[0].Event).AsCDR(rdr.cgrCfg, origCgrEvs[0].Tenant, tmz)
if err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> Converting Event : <%s> to cdr , ignoring due to error: <%s>",
utils.ERs, utils.ToJSON(origCgrEvs[0].Event), err.Error()))
return
}
record, err := cdr.AsExportRecord(rdr.Config().CacheDumpFields, nil, rdr.fltrS)
if err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> Converting CDR with CGRID: <%s> to record , ignoring due to error: <%s>",
utils.ERs, cdr.CGRID, err.Error()))
return
}
dumpFilePath := path.Join(rdr.Config().ProcessedPath, fmt.Sprintf("%s%s.%d",
cdr.OriginID, utils.TmpSuffix, time.Now().Unix()))
fileOut, err := os.Create(dumpFilePath)
if err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed creating %s, error: %s",
utils.ERs, dumpFilePath, err.Error()))
return
}
csvWriter := csv.NewWriter(fileOut)
csvWriter.Comma = rune(utils.IfaceAsString(rdr.Config().Opts[utils.CSV+utils.FieldSepOpt])[0])
if err = csvWriter.Write(record); err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed writing partial record %v to file: %s, error: %s",
utils.ERs, record, dumpFilePath, err.Error()))
return
}
if len(origCgrEvs) > 1 {
for _, origCgrEv := range origCgrEvs[1:] {
// Need to process the first event separate to take the name for the file
cdr, err = engine.NewMapEvent(origCgrEv.Event).AsCDR(rdr.cgrCfg, origCgrEv.Tenant, tmz)
if err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> Converting Event : <%s> to cdr , ignoring due to error: <%s>",
utils.ERs, utils.ToJSON(origCgrEv.Event), err.Error()))
return
}
record, err = cdr.AsExportRecord(rdr.Config().CacheDumpFields, nil, rdr.fltrS)
if err != nil {
utils.Logger.Warning(
fmt.Sprintf("<%s> Converting CDR with CGRID: <%s> to record , ignoring due to error: <%s>",
utils.ERs, cdr.CGRID, err.Error()))
return
}
if err = csvWriter.Write(record); err != nil {
utils.Logger.Err(fmt.Sprintf("<%s> Failed writing partial record %v to file: %s, error: %s",
utils.ERs, record, dumpFilePath, err.Error()))
return
}
}
}
csvWriter.Flush()
}
func (rdr *PartialCSVFileER) postCDR(itmID string, value interface{}) {
if value == nil {
return
}
tmz := utils.FirstNonEmpty(rdr.Config().Timezone,
rdr.cgrCfg.GeneralCfg().DefaultTimezone)
origCgrEvs := value.([]*utils.CGREvent)
for _, origCgrEv := range origCgrEvs {
// complete CDR are handling in processFile function
if partial, _ := origCgrEv.FieldAsString(utils.Partial); utils.IsSliceMember([]string{"false", utils.EmptyString}, partial) {
return
}
}
// how to post incomplete CDR
//sort CGREvents based on AnswertTime and SetupTime
sort.Slice(origCgrEvs, func(i, j int) bool {
aTime, err := origCgrEvs[i].FieldAsTime(utils.AnswerTime, tmz)
if err != nil && err == utils.ErrNotFound {
sTime, _ := origCgrEvs[i].FieldAsTime(utils.SetupTime, tmz)
sTime2, _ := origCgrEvs[j].FieldAsTime(utils.SetupTime, tmz)
return sTime.Before(sTime2)
}
aTime2, _ := origCgrEvs[j].FieldAsTime(utils.AnswerTime, tmz)
return aTime.Before(aTime2)
})
// compose the CGREvent from slice
cgrEv := &utils.CGREvent{
ID: utils.UUIDSha1Prefix(),
Time: utils.TimePointer(time.Now()),
Event: make(map[string]interface{}),
APIOpts: make(map[string]interface{}),
}
for i, origCgrEv := range origCgrEvs {
if i == 0 {
cgrEv.Tenant = origCgrEv.Tenant
}
for key, value := range origCgrEv.Event {
cgrEv.Event[key] = value
}
for key, val := range origCgrEv.APIOpts {
cgrEv.APIOpts[key] = val
}
}
rdr.rdrEvents <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}
}

View File

@@ -24,7 +24,6 @@ import (
"net/rpc"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
@@ -224,6 +223,7 @@ func testPartITKillEngine(t *testing.T) {
}
}
/*
func TestNewPartialCSVFileER(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
@@ -540,3 +540,392 @@ func TestPartialCSVServe2(t *testing.T) {
t.Errorf("\nExpected <%+v>, \nReceived <%+v>", "no such file or directory", err)
}
}
func TestPartialCSVServe5(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
filePath := "/tmp/partErs1/out"
err := os.MkdirAll(filePath, 0777)
if err != nil {
t.Error(err)
}
for i := 1; i < 4; i++ {
if _, err := os.Create(path.Join(filePath, fmt.Sprintf("file%d.csv", i))); err != nil {
t.Error(err)
}
}
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
os.Create(path.Join(filePath, "file1.txt"))
eR.Config().RunDelay = 1 * time.Millisecond
if err := eR.Serve(); err != nil {
t.Error(err)
}
}
func TestPartialCSVProcessEvent(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := &engine.FilterS{}
filePath := "/tmp/TestPartialCSVProcessEvent/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.csv"))
if err != nil {
t.Error(err)
}
file.Write([]byte(",a,ToR,b,c,d,e,f,g,h,i,j,k,l"))
file.Close()
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
fname := "file1.csv"
if err := eR.processFile(filePath, fname); err != nil {
t.Error(err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
value := []*utils.CGREvent{
{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"Partial": true,
},
},
{
Tenant: "cgrates2.org",
Event: map[string]interface{}{
"Partial": true,
},
},
}
eR.Config().ProcessedPath = "/tmp"
eR.dumpToFile("ID1", value)
}
func TestPartialCSVProcessEventPrefix(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
cfg.ERsCfg().Readers[0].Opts[utils.HeaderDefineCharOpt] = ":"
fltrs := &engine.FilterS{}
filePath := "/tmp/TestPartialCSVProcessEvent/"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.csv"))
if err != nil {
t.Error(err)
}
file.Write([]byte(`:Test,,*voice,OriginCDR1,*prepaid,,cgrates.org,*call,1001,SUBJECT_TEST_1001,1002,2021-01-07 17:00:02 +0000 UTC,2021-01-07 17:00:04 +0000 UTC,1h2m
:Test2,,*voice,OriginCDR1,*prepaid,,cgrates.org,*call,1001,SUBJECT_TEST_1001,1002,2021-01-07 17:00:02 +0000 UTC,2021-01-07 17:00:04 +0000 UTC,1h2m`))
file.Close()
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/ers/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
fname := "file1.csv"
if err := eR.processFile(filePath, fname); err != nil {
t.Error(err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestPartialCSVProcessEventError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := &engine.FilterS{}
filePath := "/tmp/TestPartialCSVProcessEvent/"
fname := "file1.csv"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, fname))
if err != nil {
t.Error(err)
}
file.Write([]byte(`#ToR,OriginID,RequestType,Tenant,Category,Account,Subject,Destination,SetupTime,AnswerTime,Usage
,,*voice,OriginCDR1,*prepaid,,cgrates.org,*call,1001,SUBJECT_TEST_1001,1002,2021-01-07 17:00:02 +0000 UTC,2021-01-07 17:00:04 +0000 UTC,1h2m`))
file.Close()
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/ers/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
eR.Config().Fields = []*config.FCTemplate{
{},
}
errExpect := "unsupported type: <>"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestPartialCSVProcessEventError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
cfg.ERsCfg().Readers[0].Fields = []*config.FCTemplate{}
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := engine.NewFilterS(cfg, nil, dm)
filePath := "/tmp/TestPartialCSVProcessEvent/"
fname := "file1.csv"
if err := os.MkdirAll(filePath, 0777); err != nil {
t.Error(err)
}
file, err := os.Create(path.Join(filePath, "file1.csv"))
if err != nil {
t.Error(err)
}
file.Write([]byte(`#ToR,OriginID,RequestType,Tenant,Category,Account,Subject,Destination,SetupTime,AnswerTime,Usage
,,*voice,OriginCDR1,*prepaid,,cgrates.org,*call,1001,SUBJECT_TEST_1001,1002,2021-01-07 17:00:02 +0000 UTC,2021-01-07 17:00:04 +0000 UTC,1h2m`))
file.Close()
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/ers/out/",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
//
eR.Config().Filters = []string{"Filter1"}
errExpect := "NOT_FOUND:Filter1"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
//
eR.Config().Filters = []string{"*exists:~*req..Account:"}
errExpect = "Invalid fieldPath [ Account]"
if err := eR.processFile(filePath, fname); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
if err := os.RemoveAll(filePath); err != nil {
t.Error(err)
}
}
func TestPartialCSVDumpToFileErr1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
var err error
utils.Logger, err = utils.Newlogger(utils.MetaStdLog, utils.EmptyString)
if err != nil {
t.Error(err)
}
utils.Logger.SetLogLevel(7)
buf := new(bytes.Buffer)
log.SetOutput(buf)
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
value := []*utils.CGREvent{
{
Event: map[string]interface{}{
"Partial": true,
},
},
}
//ProcessedPath is not declared in order to trigger the
//file creation error
eR.dumpToFile("ID1", value)
errExpect := "[ERROR] <ERs> Failed creating /var/spool/cgrates/ers/out/.tmp."
if rcv := buf.String(); !strings.Contains(rcv, errExpect) {
t.Errorf("\nExpected %v but \nreceived %v", errExpect, rcv)
}
value = []*utils.CGREvent{
{
Event: map[string]interface{}{
//Value is false in order to stop
//further execution
"Partial": false,
},
},
}
eR.dumpToFile("ID1", value)
eR.postCDR("ID1", value)
}
func TestPartialCSVDumpToFileErr2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
var err error
utils.Logger, err = utils.Newlogger(utils.MetaStdLog, utils.EmptyString)
if err != nil {
t.Error(err)
}
utils.Logger.SetLogLevel(7)
buf := new(bytes.Buffer)
log.SetOutput(buf)
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
value := []*utils.CGREvent{
{
Event: map[string]interface{}{
//Value of field is string in order to trigger
//the converting error
"Partial": "notBool",
},
},
}
eR.dumpToFile("ID1", value)
errExpect := `[WARNING] <ERs> Converting Event : <{"Partial":"notBool"}> to cdr , ignoring due to error: <strconv.ParseBool: parsing "notBool": invalid syntax>`
if rcv := buf.String(); !strings.Contains(rcv, errExpect) {
t.Errorf("\nExpected %v but \nreceived %v", errExpect, rcv)
}
}
func TestPartialCSVDumpToFileErr3(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
var err error
utils.Logger, err = utils.Newlogger(utils.MetaStdLog, utils.EmptyString)
if err != nil {
t.Error(err)
}
utils.Logger.SetLogLevel(7)
buf := new(bytes.Buffer)
log.SetOutput(buf)
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
value := []*utils.CGREvent{
{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"Partial": true,
},
},
//Added a second event in order to pass the length check
{
Tenant: "cgrates2.org",
Event: map[string]interface{}{
"Partial": "notBool",
},
},
}
eR.Config().ProcessedPath = "/tmp"
eR.dumpToFile("ID1", value)
errExpect := `[WARNING] <ERs> Converting Event : <{"Partial":"notBool"}> to cdr , ignoring due to error: <strconv.ParseBool: parsing "notBool": invalid syntax>`
if rcv := buf.String(); !strings.Contains(rcv, errExpect) {
t.Errorf("\nExpected %v but \nreceived %v", errExpect, rcv)
}
}
func TestPartialCSVPostCDR(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltrs := &engine.FilterS{}
eR := &PartialCSVFileER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrDir: "/tmp/partErs1/out",
rdrEvents: make(chan *erEvent, 1),
rdrError: make(chan error, 1),
rdrExit: make(chan struct{}),
conReqs: make(chan struct{}, 1),
}
eR.conReqs <- struct{}{}
value := []*utils.CGREvent{
{
Tenant: "cgrates.org",
Event: map[string]interface{}{
"Partial": true,
},
APIOpts: map[string]interface{}{
"Opt1": "testOpt",
},
},
}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: value[0].Event,
APIOpts: value[0].APIOpts,
}
eR.postCDR("ID1", nil)
eR.postCDR("ID1", value)
select {
case data := <-eR.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(expEvent, data.cgrEvent) {
t.Errorf("\nExpected %v but \nreceived %v", expEvent, data.cgrEvent)
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
}
*/

View File

@@ -34,35 +34,31 @@ type EventReader interface {
// NewEventReader instantiates the event reader based on configuration at index
func NewEventReader(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
switch cfg.ERsCfg().Readers[cfgIdx].Type {
default:
err = fmt.Errorf("unsupported reader type: <%s>", cfg.ERsCfg().Readers[cfgIdx].Type)
case utils.MetaFileCSV:
return NewCSVFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
case utils.MetaPartialCSV:
return NewPartialCSVFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewCSVFileER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaFileXML:
return NewXMLFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewXMLFileER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaFileFWV:
return NewFWVFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewFWVFileER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaKafkajsonMap:
return NewKafkaER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewKafkaER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaSQL:
return NewSQLEventReader(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
case utils.MetaFlatstore:
return NewFlatstoreER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewSQLEventReader(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaFileJSON:
return NewJSONFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewJSONFileER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaAMQPjsonMap:
return NewAMQPER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewAMQPER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaS3jsonMap:
return NewS3ER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewS3ER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaSQSjsonMap:
return NewSQSER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewSQSER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
case utils.MetaAMQPV1jsonMap:
return NewAMQPv1ER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
return NewAMQPv1ER(cfg, cfgIdx, rdrEvents, partialEvents, rdrErr, fltrS, rdrExit)
}
return
}

View File

@@ -36,7 +36,7 @@ func TestNewInvalidReader(t *testing.T) {
if len(cfg.ERsCfg().Readers) != 2 {
t.Errorf("Expecting: <2>, received: <%+v>", len(cfg.ERsCfg().Readers))
}
if _, err := NewEventReader(cfg, 1, nil, nil, &engine.FilterS{}, nil); err == nil || err.Error() != "unsupported reader type: <Invalid>" {
if _, err := NewEventReader(cfg, 1, nil, nil, nil, &engine.FilterS{}, nil); err == nil || err.Error() != "unsupported reader type: <Invalid>" {
t.Errorf("Expecting: <unsupported reader type: <Invalid>>, received: <%+v>", err)
}
}
@@ -61,7 +61,7 @@ func TestNewCsvReader(t *testing.T) {
rdrExit: nil,
conReqs: nil}
var expected EventReader = exp
if rcv, err := NewEventReader(cfg, 1, nil, nil, fltr, nil); err != nil {
if rcv, err := NewEventReader(cfg, 1, nil, nil, nil, fltr, nil); err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
} else {
// because we use function make to init the channel when we create the EventReader reflect.DeepEqual
@@ -84,11 +84,11 @@ func TestNewKafkaReader(t *testing.T) {
if len(cfg.ERsCfg().Readers) != 2 {
t.Errorf("Expecting: <2>, received: <%+v>", len(cfg.ERsCfg().Readers))
}
expected, err := NewKafkaER(cfg, 1, nil, nil, fltr, nil)
expected, err := NewKafkaER(cfg, 1, nil, nil, nil, fltr, nil)
if err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
}
if rcv, err := NewEventReader(cfg, 1, nil, nil, fltr, nil); err != nil {
if rcv, err := NewEventReader(cfg, 1, nil, nil, nil, fltr, nil); err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expecting: <%+v>, received: <%+v>", expected, rcv)
@@ -109,11 +109,11 @@ func TestNewSQLReader(t *testing.T) {
if len(cfg.ERsCfg().Readers) != 2 {
t.Errorf("Expecting: <2>, received: <%+v>", len(cfg.ERsCfg().Readers))
}
expected, err := NewSQLEventReader(cfg, 1, nil, nil, fltr, nil)
expected, err := NewSQLEventReader(cfg, 1, nil, nil, nil, fltr, nil)
if err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
}
if rcv, err := NewEventReader(cfg, 1, nil, nil, fltr, nil); err != nil {
if rcv, err := NewEventReader(cfg, 1, nil, nil, nil, fltr, nil); err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expecting: <%+v>, received: <%+v>", expected, rcv)
@@ -131,8 +131,183 @@ func TestNewSQLReaderError(t *testing.T) {
reader.SourcePath = "#"
reader.ProcessedPath = ""
expected := "unknown db_type "
_, err := NewSQLEventReader(cfg, 0, nil, nil, fltr, nil)
_, err := NewSQLEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err == nil || err.Error() != expected {
t.Errorf("Expecting: <%+v>, received: <%+v>", expected, err)
}
}
func TestNewFileXMLReader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaFileXML
expected, err := NewXMLFileER(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
}
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
} else {
rcv.(*XMLFileER).conReqs = nil
expected.(*XMLFileER).conReqs = nil
if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expecting %v but received %v", expected, rcv)
}
}
}
func TestNewFileFWVReader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaFileFWV
expected, err := NewFWVFileER(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
}
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(nil)
} else {
rcv.(*FWVFileER).conReqs = nil
expected.(*FWVFileER).conReqs = nil
if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expecting %v but received %v", expected, rcv)
}
}
}
func TestNewJSONReader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaFileJSON
expected, err := NewJSONFileER(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
}
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
} else {
rcv.(*JSONFileER).conReqs = nil
expected.(*JSONFileER).conReqs = nil
if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expecting %v but received %v", expected, rcv)
}
}
}
func TestNewAMQPReader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaAMQPjsonMap
cfg.ERsCfg().Readers[0].ConcurrentReqs = -1
exp := &AMQPER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltr,
rdrEvents: nil,
rdrExit: nil,
rdrErr: nil,
}
exp.dialURL = exp.Config().SourcePath
exp.Config().ProcessedPath = ""
exp.setOpts(map[string]interface{}{})
exp.createPoster()
var expected EventReader = exp
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expected %v but received %v", expected, rcv)
}
}
func TestNewAMQPv1Reader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaAMQPV1jsonMap
cfg.ERsCfg().Readers[0].ConcurrentReqs = -1
exp := &AMQPv1ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltr,
rdrEvents: nil,
rdrExit: nil,
rdrErr: nil,
}
exp.Config().ProcessedPath = ""
exp.Config().Opts = map[string]interface{}{}
exp.createPoster()
var expected EventReader = exp
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expected \n%v but received \n%v", expected, rcv)
}
}
func TestNewS3Reader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaS3jsonMap
cfg.ERsCfg().Readers[0].ConcurrentReqs = -1
exp := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltr,
rdrEvents: nil,
rdrExit: nil,
rdrErr: nil,
bucket: "cgrates_cdrs",
}
exp.Config().ProcessedPath = ""
exp.Config().Opts = map[string]interface{}{}
exp.createPoster()
var expected EventReader = exp
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expected \n%v but received \n%v", expected, rcv)
}
}
func TestNewSQSReader(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
fltr := &engine.FilterS{}
cfg.ERsCfg().Readers[0].Type = utils.MetaSQSjsonMap
cfg.ERsCfg().Readers[0].ConcurrentReqs = -1
exp := &SQSER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltr,
rdrEvents: nil,
rdrExit: nil,
rdrErr: nil,
queueID: "cgrates_cdrs",
}
exp.Config().SourcePath = "string"
// var err error
// awsCfg := aws.Config{Endpoint: aws.String(exp.Config().SourcePath)}
// exp.session, err = session.NewSessionWithOptions(
// session.Options{
// Config: awsCfg,
// },
// )
// if err != nil {
// t.Error(err)
// }
exp.Config().ProcessedPath = ""
exp.Config().Opts = map[string]interface{}{}
exp.createPoster()
var expected EventReader = exp
rcv, err := NewEventReader(cfg, 0, nil, nil, nil, fltr, nil)
exp.session = rcv.(*SQSER).session
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(expected, rcv) {
t.Errorf("Expected \n%v but received \n%v", expected, rcv)
}
}

View File

@@ -37,16 +37,17 @@ import (
// NewS3ER return a new s3 event reader
func NewS3ER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -65,10 +66,11 @@ type S3ER struct {
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
awsRegion string
awsID string
@@ -131,7 +133,11 @@ func (rdr *S3ER) processMessage(body []byte) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -81,7 +81,7 @@ func TestS3ER(t *testing.T) {
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
if rdr, err = NewS3ER(cfg, 1, rdrEvents,
if rdr, err = NewS3ER(cfg, 1, rdrEvents, make(chan *erEvent, 1),
rdrErr, new(engine.FilterS), rdrExit); err != nil {
t.Fatal(err)
}
@@ -172,7 +172,7 @@ func TestNewS3ER(t *testing.T) {
},
}
rdr, err := NewS3ER(cfg, 1, nil,
rdr, err := NewS3ER(cfg, 1, nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
@@ -213,7 +213,7 @@ func TestNewS3ERCase2(t *testing.T) {
},
}
rdr, err := NewS3ER(cfg, 0, nil,
rdr, err := NewS3ER(cfg, 0, nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)

299
ers/s3_test.go Normal file
View File

@@ -0,0 +1,299 @@
/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package ers
import (
"reflect"
"testing"
"time"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/cgrates/cgrates/config"
"github.com/cgrates/cgrates/engine"
"github.com/cgrates/cgrates/utils"
)
func TestS3ERServe(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr, err := NewS3ER(cfg, 0, nil, nil,
nil, nil, nil)
if err != nil {
t.Error(err)
}
rdr.Config().RunDelay = 1 * time.Millisecond
if err := rdr.Serve(); err != nil {
t.Error(err)
}
}
func TestS3ERServe2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: nil,
rdrEvents: nil,
rdrExit: nil,
rdrErr: nil,
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
if err := rdr.Serve(); err != nil {
t.Error(err)
}
}
func TestS3ERProcessMessage(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
expEvent := &utils.CGREvent{
Tenant: "cgrates.org",
Event: map[string]interface{}{
utils.CGRID: "testCgrId",
},
APIOpts: map[string]interface{}{},
}
body := []byte(`{"CGRID":"testCgrId"}`)
rdr.Config().Fields = []*config.FCTemplate{
{
Tag: "CGRID",
Type: utils.MetaConstant,
Value: config.NewRSRParsersMustCompile("testCgrId", utils.InfieldSep),
Path: "*cgreq.CGRID",
},
}
rdr.Config().Fields[0].ComputePath()
if err := rdr.processMessage(body); err != nil {
t.Error(err)
}
select {
case data := <-rdr.rdrEvents:
expEvent.ID = data.cgrEvent.ID
expEvent.Time = data.cgrEvent.Time
if !reflect.DeepEqual(data.cgrEvent, expEvent) {
t.Errorf("Expected %v but received %v", utils.ToJSON(expEvent), utils.ToJSON(data.cgrEvent))
}
case <-time.After(50 * time.Millisecond):
t.Error("Time limit exceeded")
}
}
func TestS3ERProcessMessageError1(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
rdr.Config().Fields = []*config.FCTemplate{
{},
}
body := []byte(`{"CGRID":"testCgrId"}`)
errExpect := "unsupported type: <>"
if err := rdr.processMessage(body); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestS3ERProcessMessageError2(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
data := engine.NewInternalDB(nil, nil, true)
dm := engine.NewDataManager(data, cfg.CacheCfg(), nil)
cfg.ERsCfg().Readers[0].ProcessedPath = ""
fltrs := engine.NewFilterS(cfg, nil, dm)
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: fltrs,
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
body := []byte(`{"CGRID":"testCgrId"}`)
rdr.Config().Filters = []string{"Filter1"}
errExpect := "NOT_FOUND:Filter1"
if err := rdr.processMessage(body); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
//
rdr.Config().Filters = []string{"*exists:~*req..Account:"}
if err := rdr.processMessage(body); err != nil {
t.Error(err)
}
}
func TestS3ERProcessMessageError3(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
body := []byte("invalid_format")
errExpect := "invalid character 'i' looking for beginning of value"
if err := rdr.processMessage(body); err == nil || err.Error() != errExpect {
t.Errorf("Expected %v but received %v", errExpect, err)
}
}
func TestS3ERParseOpts(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
opts := map[string]interface{}{
utils.S3Bucket: "QueueID",
utils.AWSRegion: "AWSRegion",
utils.AWSKey: "AWSKey",
utils.AWSSecret: "AWSSecret",
utils.AWSToken: "AWSToken",
}
rdr.parseOpts(opts)
if rdr.bucket != opts[utils.S3Bucket] ||
rdr.awsRegion != opts[utils.AWSRegion] ||
rdr.awsID != opts[utils.AWSKey] ||
rdr.awsKey != opts[utils.AWSSecret] ||
rdr.awsToken != opts[utils.AWSToken] {
t.Error("Fields do not corespond")
}
rdr.Config().Opts = map[string]interface{}{}
rdr.Config().ProcessedPath = utils.EmptyString
rdr.createPoster()
}
func TestS3ERIsClosed(t *testing.T) {
cfg := config.NewDefaultCGRConfig()
rdr := &S3ER{
cgrCfg: cfg,
cfgIdx: 0,
fltrS: new(engine.FilterS),
rdrEvents: make(chan *erEvent, 1),
rdrExit: make(chan struct{}, 1),
rdrErr: make(chan error, 1),
cap: nil,
awsRegion: "us-east-2",
awsID: "AWSId",
awsKey: "AWSAccessKeyId",
awsToken: "",
bucket: "cgrates_cdrs",
session: nil,
poster: nil,
}
if rcv := rdr.isClosed(); rcv != false {
t.Errorf("Expected %v but received %v", false, true)
}
rdr.rdrExit <- struct{}{}
if rcv := rdr.isClosed(); rcv != true {
t.Errorf("Expected %v but received %v", true, false)
}
}
type s3ClientMock struct {
ListObjectsV2PagesF func(input *s3.ListObjectsV2Input, fn func(*s3.ListObjectsV2Output, bool) bool) error
GetObjectF func(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
DeleteObjectF func(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
}
func (s *s3ClientMock) ListObjectsV2Pages(input *s3.ListObjectsV2Input, fn func(*s3.ListObjectsV2Output, bool) bool) error {
if s.ListObjectsV2PagesF != nil {
return s.ListObjectsV2PagesF(input, fn)
}
return utils.ErrNotFound
}
func (s *s3ClientMock) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
if s.GetObjectF != nil {
return s.GetObjectF(input)
}
return nil, utils.ErrNotImplemented
}
func (s *s3ClientMock) DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
// return nil, nil
if s.DeleteObjectF != nil {
return s.DeleteObjectF(input)
}
return nil, utils.ErrInvalidPath
}

View File

@@ -46,16 +46,17 @@ const (
// NewSQLEventReader return a new sql event reader
func NewSQLEventReader(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &SQLEventReader{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -86,10 +87,11 @@ type SQLEventReader struct {
expConnType string
expTableName string
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
}
// Config returns the curent configuration
@@ -251,7 +253,11 @@ func (rdr *SQLEventReader) processMessage(msg map[string]interface{}) (err error
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -228,7 +228,7 @@ func testSQLReader(t *testing.T) {
rdrEvents = make(chan *erEvent, 1)
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
sqlER, err := NewEventReader(sqlCfg, 1, rdrEvents, rdrErr, new(engine.FilterS), rdrExit)
sqlER, err := NewEventReader(sqlCfg, 1, rdrEvents, make(chan *erEvent, 1), rdrErr, new(engine.FilterS), rdrExit)
if err != nil {
t.Fatal(err)
}
@@ -474,7 +474,7 @@ func testSQLReader3(t *testing.T) {
rdrEvents = make(chan *erEvent, 1)
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
sqlER, err := NewEventReader(sqlCfg, 1, rdrEvents, rdrErr, new(engine.FilterS), rdrExit)
sqlER, err := NewEventReader(sqlCfg, 1, rdrEvents, make(chan *erEvent, 1), rdrErr, new(engine.FilterS), rdrExit)
if err != nil {
t.Fatal(err)
}
@@ -676,7 +676,7 @@ func TestErsSqlPostCDRS(t *testing.T) {
if len(cfg.ERsCfg().Readers) != 2 {
t.Errorf("Expecting: <2>, received: <%+v>", len(cfg.ERsCfg().Readers))
}
sqlEvReader, err := NewSQLEventReader(cfg, 1, nil, nil, fltr, nil)
sqlEvReader, err := NewSQLEventReader(cfg, 1, nil, nil, nil, fltr, nil)
if err != nil {
t.Errorf("Expecting: <nil>, received: <%+v>", err)
}

View File

@@ -37,16 +37,17 @@ import (
// NewSQSER return a new sqs event reader
func NewSQSER(cfg *config.CGRConfig, cfgIdx int,
rdrEvents chan *erEvent, rdrErr chan error,
rdrEvents, partialEvents chan *erEvent, rdrErr chan error,
fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
rdr := &SQSER{
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
cgrCfg: cfg,
cfgIdx: cfgIdx,
fltrS: fltrS,
rdrEvents: rdrEvents,
partialEvents: partialEvents,
rdrExit: rdrExit,
rdrErr: rdrErr,
}
if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
rdr.cap = make(chan struct{}, concReq)
@@ -65,10 +66,11 @@ type SQSER struct {
cfgIdx int // index of config instance within ERsCfg.Readers
fltrS *engine.FilterS
rdrEvents chan *erEvent // channel to dispatch the events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
rdrEvents chan *erEvent // channel to dispatch the events created to
partialEvents chan *erEvent // channel to dispatch the partial events created to
rdrExit chan struct{}
rdrErr chan error
cap chan struct{}
queueURL *string
awsRegion string
@@ -124,7 +126,11 @@ func (rdr *SQSER) processMessage(body []byte) (err error) {
return
}
cgrEv := utils.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep, agReq.Opts)
rdr.rdrEvents <- &erEvent{
rdrEv := rdr.rdrEvents
if _, isPartial := cgrEv.APIOpts[partialOpt]; isPartial {
rdrEv = rdr.partialEvents
}
rdrEv <- &erEvent{
cgrEvent: cgrEv,
rdrCfg: rdr.Config(),
}

View File

@@ -80,7 +80,7 @@ func TestSQSER(t *testing.T) {
rdrErr = make(chan error, 1)
rdrExit = make(chan struct{}, 1)
if rdr, err = NewSQSER(cfg, 1, rdrEvents,
if rdr, err = NewSQSER(cfg, 1, rdrEvents, make(chan *erEvent, 1),
rdrErr, new(engine.FilterS), rdrExit); err != nil {
t.Fatal(err)
}

View File

@@ -51,7 +51,7 @@ func TestNewSQSER(t *testing.T) {
Opts: make(map[string]interface{}),
},
}
rdr, err := NewSQSER(cfg, 0, nil,
rdr, err := NewSQSER(cfg, 0, nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
@@ -79,7 +79,7 @@ func TestSQSERServeRunDelay0(t *testing.T) {
Opts: make(map[string]interface{}),
},
}
rdr, err := NewSQSER(cfg, 0, nil,
rdr, err := NewSQSER(cfg, 0, nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)
@@ -105,7 +105,7 @@ func TestSQSERServe(t *testing.T) {
Opts: make(map[string]interface{}),
},
}
rdr, err := NewSQSER(cfg, 0, nil,
rdr, err := NewSQSER(cfg, 0, nil, nil,
nil, nil, nil)
if err != nil {
t.Fatal(err)

View File

@@ -303,7 +303,6 @@ const (
StaticValuePrefix = "^"
CSV = "csv"
FWV = "fwv"
MetaPartialCSV = "*partial_csv"
MetaCombimed = "*combimed"
MetaMongo = "*mongo"
MetaPostgres = "*postgres"
@@ -369,7 +368,6 @@ const (
MetaS3jsonMap = "*s3_json_map"
ConfigPath = "/etc/cgrates/"
DisconnectCause = "DisconnectCause"
MetaFlatstore = "*flatstore"
MetaRating = "*rating"
NotAvailable = "N/A"
Call = "call"
@@ -2077,16 +2075,17 @@ const (
// EventReaderCfg
const (
IDCfg = "id"
CacheCfg = "cache"
FieldSepCfg = "field_separator"
RunDelayCfg = "run_delay"
SourcePathCfg = "source_path"
ProcessedPathCfg = "processed_path"
TenantCfg = "tenant"
FlagsCfg = "flags"
FieldsCfg = "fields"
CacheDumpFieldsCfg = "cache_dump_fields"
IDCfg = "id"
CacheCfg = "cache"
FieldSepCfg = "field_separator"
RunDelayCfg = "run_delay"
SourcePathCfg = "source_path"
ProcessedPathCfg = "processed_path"
TenantCfg = "tenant"
FlagsCfg = "flags"
FieldsCfg = "fields"
CacheDumpFieldsCfg = "cache_dump_fields"
PartialCommitFieldsCfg = "partial_commit_fields"
)
// RegistrarCCfg
@@ -2331,6 +2330,12 @@ const (
KafkaTopic = "kafkaTopic"
KafkaGroupID = "kafkaGroupID"
KafkaMaxWait = "kafkaMaxWait"
// partial
PartialOrderFieldOpt = "partialOrderField"
PartialCacheAction = "partialCacheAction"
PartialPathOpt = "partialPath"
PartialCSVFieldSepartor = "partialcsvFieldSeparator"
)
var (

View File

@@ -123,7 +123,7 @@ func (n *DataNode) Field(path []string) (*DataLeaf, error) {
if err != nil {
return nil, err
}
if idx < 0 { // in case the index is negative add the slice lenght
if idx < 0 { // in case the index is negative add the slice length
idx += len(n.Slice)
}
if idx < 0 || idx >= len(n.Slice) { // check if the index is in range [0,len(slice))
@@ -171,7 +171,7 @@ func (n *DataNode) fieldAsInterface(path []string) (interface{}, error) {
if err != nil {
return nil, err
}
if idx < 0 { // in case the index is negative add the slice lenght
if idx < 0 { // in case the index is negative add the slice length
idx += len(n.Slice)
}
if idx < 0 || idx >= len(n.Slice) { // check if the index is in range [0,len(slice))
@@ -233,7 +233,7 @@ func (n *DataNode) Set(path []string, val interface{}) (addedNew bool, err error
return true, err
}
// try dynamic path instead
// if idx < 0 { // in case the index is negative add the slice lenght
// if idx < 0 { // in case the index is negative add the slice length
// idx += len(n.Slice)
// path[0] = strconv.Itoa(idx) // update the slice to reflect on orderNavMap
// }
@@ -281,7 +281,7 @@ func (n *DataNode) Remove(path []string) error {
if err != nil {
return err // the only error is when we expect an index but is not int
}
if idx < 0 { // in case the index is negative add the slice lenght
if idx < 0 { // in case the index is negative add the slice length
idx += len(n.Slice)
path[0] = strconv.Itoa(idx) // update the path for OrdNavMap
}
@@ -341,7 +341,7 @@ func (n *DataNode) Append(path []string, val *DataLeaf) (idx int, err error) {
return node.Append(path[1:], val)
}
// try dynamic path instead
// if idx < 0 { // in case the index is negative add the slice lenght
// if idx < 0 { // in case the index is negative add the slice length
// idx += len(n.Slice)
// path[0] = strconv.Itoa(idx) // update the slice to reflect on orderNavMap
// }
@@ -401,7 +401,7 @@ func (n *DataNode) Compose(path []string, val *DataLeaf) (err error) {
return node.Compose(path[1:], val)
}
// try dynamic path instead
// if idx < 0 { // in case the index is negative add the slice lenght
// if idx < 0 { // in case the index is negative add the slice length
// idx += len(n.Slice)
// path[0] = strconv.Itoa(idx) // update the slice to reflect on orderNavMap
// }