diff --git a/ers/ers.go b/ers/ers.go
index caba5ab68..55111061a 100644
--- a/ers/ers.go
+++ b/ers/ers.go
@@ -81,11 +81,13 @@ func (erS *ERService) ListenAndServe(cfgRldChan chan struct{}) (err error) {
for {
select {
case err = <-erS.rdrErr: // got application error
+ erS.closeAllRdrs()
utils.Logger.Crit(
fmt.Sprintf("<%s> running reader got error: <%s>",
utils.ERs, err.Error()))
return
case <-erS.stopChan:
+ erS.closeAllRdrs()
return
case erEv := <-erS.rdrEvents:
if err := erS.processEvent(erEv.cgrEvent, erEv.rdrCfg, erEv.opts); err != nil {
@@ -302,3 +304,9 @@ func (erS *ERService) processEvent(cgrEv *utils.CGREvent,
return
}
+
+func (erS *ERService) closeAllRdrs() {
+ for _, stopL := range erS.stopLsn {
+ close(stopL)
+ }
+}
diff --git a/ers/reader.go b/ers/reader.go
index 62d6b702d..11e936e87 100644
--- a/ers/reader.go
+++ b/ers/reader.go
@@ -57,6 +57,8 @@ func NewEventReader(cfg *config.CGRConfig, cfgIdx int,
return NewJSONFileER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
case utils.MetaAMQPjsonMap:
return NewAMQPER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
+ case utils.MetaS3jsonMap:
+ return NewS3ER(cfg, cfgIdx, rdrEvents, rdrErr, fltrS, rdrExit)
}
return
}
diff --git a/ers/s3.go b/ers/s3.go
new file mode 100644
index 000000000..a8e11b2fb
--- /dev/null
+++ b/ers/s3.go
@@ -0,0 +1,244 @@
+/*
+Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
+Copyright (C) ITsysCOM GmbH
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see
+*/
+
+package ers
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/cgrates/cgrates/agents"
+ "github.com/cgrates/cgrates/config"
+ "github.com/cgrates/cgrates/engine"
+ "github.com/cgrates/cgrates/utils"
+)
+
+// NewS3ER return a new s3 event reader
+func NewS3ER(cfg *config.CGRConfig, cfgIdx int,
+ rdrEvents chan *erEvent, rdrErr chan error,
+ fltrS *engine.FilterS, rdrExit chan struct{}) (er EventReader, err error) {
+
+ rdr := &S3ER{
+ cgrCfg: cfg,
+ cfgIdx: cfgIdx,
+ fltrS: fltrS,
+ rdrEvents: rdrEvents,
+ rdrExit: rdrExit,
+ rdrErr: rdrErr,
+ }
+ if concReq := rdr.Config().ConcurrentReqs; concReq != -1 {
+ rdr.cap = make(chan struct{}, concReq)
+ for i := 0; i < concReq; i++ {
+ rdr.cap <- struct{}{}
+ }
+ }
+ rdr.parseOpts(rdr.Config().Opts)
+ return rdr, nil
+}
+
+// S3ER implements EventReader interface for kafka message
+type S3ER struct {
+ // sync.RWMutex
+ cgrCfg *config.CGRConfig
+ cfgIdx int // index of config instance within ERsCfg.Readers
+ fltrS *engine.FilterS
+
+ rdrEvents chan *erEvent // channel to dispatch the events created to
+ rdrExit chan struct{}
+ rdrErr chan error
+ cap chan struct{}
+
+ awsRegion string
+ awsID string
+ awsKey string
+ awsToken string
+ queueID string
+ session *session.Session
+
+ poster engine.Poster
+}
+
+// Config returns the curent configuration
+func (rdr *S3ER) Config() *config.EventReaderCfg {
+ return rdr.cgrCfg.ERsCfg().Readers[rdr.cfgIdx]
+}
+
+// Serve will start the gorutines needed to watch the kafka topic
+func (rdr *S3ER) Serve() (err error) {
+ var sess *session.Session
+ cfg := aws.Config{Endpoint: aws.String(rdr.Config().SourcePath)}
+ if len(rdr.awsRegion) != 0 {
+ cfg.Region = aws.String(rdr.awsRegion)
+ }
+ if len(rdr.awsID) != 0 &&
+ len(rdr.awsKey) != 0 {
+ cfg.Credentials = credentials.NewStaticCredentials(rdr.awsID, rdr.awsKey, rdr.awsToken)
+ }
+ if sess, err = session.NewSessionWithOptions(session.Options{Config: cfg}); err != nil {
+ return
+ }
+ rdr.session = sess
+
+ if rdr.Config().RunDelay == time.Duration(0) { // 0 disables the automatic read, maybe done per API
+ return
+ }
+
+ go rdr.readLoop() // read until the connection is closed
+ return
+}
+
+func (rdr *S3ER) processMessage(body []byte) (err error) {
+ var decodedMessage map[string]interface{}
+ if err = json.Unmarshal(body, &decodedMessage); err != nil {
+ return
+ }
+
+ agReq := agents.NewAgentRequest(
+ utils.MapStorage(decodedMessage), nil,
+ nil, nil, nil, rdr.Config().Tenant,
+ rdr.cgrCfg.GeneralCfg().DefaultTenant,
+ utils.FirstNonEmpty(rdr.Config().Timezone,
+ rdr.cgrCfg.GeneralCfg().DefaultTimezone),
+ rdr.fltrS, nil, nil) // create an AgentRequest
+ var pass bool
+ if pass, err = rdr.fltrS.Pass(agReq.Tenant, rdr.Config().Filters,
+ agReq); err != nil || !pass {
+ return
+ }
+ if err = agReq.SetFields(rdr.Config().Fields); err != nil {
+ return
+ }
+ rdr.rdrEvents <- &erEvent{
+ cgrEvent: config.NMAsCGREvent(agReq.CGRRequest, agReq.Tenant, utils.NestingSep),
+ rdrCfg: rdr.Config(),
+ opts: config.NMAsMapInterface(agReq.Opts, utils.NestingSep),
+ }
+ return
+}
+
+func (rdr *S3ER) parseOpts(opts map[string]interface{}) {
+ rdr.queueID = utils.DefaultQueueID
+ if val, has := opts[utils.QueueID]; has {
+ rdr.queueID = utils.IfaceAsString(val)
+ }
+ if val, has := opts[utils.AWSRegion]; has {
+ rdr.awsRegion = utils.IfaceAsString(val)
+ }
+ if val, has := opts[utils.AWSKey]; has {
+ rdr.awsID = utils.IfaceAsString(val)
+ }
+ if val, has := opts[utils.AWSSecret]; has {
+ rdr.awsKey = utils.IfaceAsString(val)
+ }
+ if val, has := opts[utils.AWSToken]; has {
+ rdr.awsToken = utils.IfaceAsString(val)
+ }
+}
+
+func (rdr *S3ER) readLoop() (err error) {
+ scv := s3.New(rdr.session)
+ var keys []string
+ if err = scv.ListObjectsV2Pages(&s3.ListObjectsV2Input{Bucket: aws.String(rdr.queueID)},
+ func(lovo *s3.ListObjectsV2Output, b bool) bool {
+ for _, objMeta := range lovo.Contents {
+ if objMeta.Key != nil {
+ keys = append(keys, *objMeta.Key)
+ }
+ }
+ return !rdr.isClosed()
+ }); err != nil {
+ rdr.rdrErr <- err
+ return
+ }
+ if rdr.isClosed() {
+ return
+ }
+ for _, key := range keys {
+ go rdr.readMsg(scv, key)
+ }
+ return
+}
+
+func (rdr *S3ER) createPoster() {
+ processedOpt := getProcessOptions(rdr.Config().Opts)
+ if len(processedOpt) == 0 &&
+ len(rdr.Config().ProcessedPath) == 0 {
+ return
+ }
+ rdr.poster = engine.NewKafkaPoster(utils.FirstNonEmpty(rdr.Config().ProcessedPath, rdr.Config().SourcePath),
+ rdr.cgrCfg.GeneralCfg().PosterAttempts, processedOpt)
+}
+
+func (rdr *S3ER) isClosed() bool {
+ select {
+ case <-rdr.rdrExit:
+ return true
+ default:
+ return false
+ }
+}
+
+func (rdr *S3ER) readMsg(scv *s3.S3, key string) (err error) {
+ if rdr.Config().ConcurrentReqs != -1 {
+ <-rdr.cap // do not try to read if the limit is reached
+ defer func() { rdr.cap <- struct{}{} }()
+ }
+ if rdr.isClosed() {
+ return
+ }
+
+ obj, err := scv.GetObject(&s3.GetObjectInput{Bucket: &rdr.queueID, Key: &key})
+ if err != nil {
+ rdr.rdrErr <- err
+ return
+ }
+ var msg []byte
+ if msg, err = ioutil.ReadAll(obj.Body); err != nil {
+ utils.Logger.Warning(
+ fmt.Sprintf("<%s> decoding message %s error: %s",
+ utils.ERs, key, err.Error()))
+ return
+ }
+ obj.Body.Close()
+ if err = rdr.processMessage(msg); err != nil {
+ utils.Logger.Warning(
+ fmt.Sprintf("<%s> processing message %s error: %s",
+ utils.ERs, key, err.Error()))
+ return
+ }
+ if _, err = scv.DeleteObject(&s3.DeleteObjectInput{Bucket: &rdr.queueID, Key: &key}); err != nil {
+ rdr.rdrErr <- err
+ return
+ }
+
+ if rdr.poster != nil { // post it
+ if err = rdr.poster.Post(msg, key); err != nil {
+ utils.Logger.Warning(
+ fmt.Sprintf("<%s> writing message %s error: %s",
+ utils.ERs, key, err.Error()))
+ return
+ }
+ }
+ return
+}
diff --git a/ers/s3_it_test.go b/ers/s3_it_test.go
new file mode 100644
index 000000000..bf8af074f
--- /dev/null
+++ b/ers/s3_it_test.go
@@ -0,0 +1,132 @@
+// +build integration
+
+/*
+Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
+Copyright (C) ITsysCOM GmbH
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see
+*/
+
+package ers
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/cgrates/cgrates/config"
+ "github.com/cgrates/cgrates/engine"
+ "github.com/cgrates/cgrates/utils"
+)
+
+var (
+ itTestS3 = flag.Bool("s4", false, "Run the test for S3Reader")
+)
+
+func TestS3ER(t *testing.T) {
+ if !*itTestS3 {
+ t.SkipNow()
+ }
+ cfg, err := config.NewCGRConfigFromJSONStringWithDefaults(`{
+"ers": { // EventReaderService
+ "enabled": true, // starts the EventReader service:
+ "readers": [
+ {
+ "id": "s3", // identifier of the EventReader profile
+ "type": "*s3_json_map", // reader type <*file_csv>
+ "run_delay": "-1", // sleep interval in seconds between consecutive runs, -1 to use automation via inotify or 0 to disable running all together
+ "concurrent_requests": 1024, // maximum simultaneous requests/files to process, 0 for unlimited
+ "source_path": "s3.us-east-2.amazonaws.com", // read data from this path
+ // "processed_path": "/var/spool/cgrates/ers/out", // move processed data here
+ "tenant": "cgrates.org", // tenant used by import
+ "filters": [], // limit parsing based on the filters
+ "flags": [], // flags to influence the event processing
+ "opts": {
+ "queueID": "cgrates-cdrs",
+ "awsRegion": "us-east-2",
+ "awsKey": "AWSAccessKeyId",
+ "awsSecret": "AWSSecretKey",
+ // "awsToken": "".
+ },
+ "fields":[ // import fields template, tag will match internally CDR field, in case of .csv value will be represented by index of the field value
+ {"tag": "CGRID", "type": "*composed", "value": "~*req.CGRID", "path": "*cgreq.CGRID"},
+ ],
+ },
+ ],
+},
+}`)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rdrEvents = make(chan *erEvent, 1)
+ rdrErr = make(chan error, 1)
+ rdrExit = make(chan struct{}, 1)
+
+ if rdr, err = NewS3ER(cfg, 1, rdrEvents,
+ rdrErr, new(engine.FilterS), rdrExit); err != nil {
+ t.Fatal(err)
+ }
+ s3Rdr := rdr.(*S3ER)
+ var sess *session.Session
+ awsCfg := aws.Config{Endpoint: aws.String(rdr.Config().SourcePath)}
+ awsCfg.Region = aws.String(s3Rdr.awsRegion)
+ awsCfg.Credentials = credentials.NewStaticCredentials(s3Rdr.awsID, s3Rdr.awsKey, s3Rdr.awsToken)
+
+ if sess, err = session.NewSessionWithOptions(session.Options{Config: awsCfg}); err != nil {
+ return
+ }
+ scv := s3manager.NewUploader(sess)
+
+ randomCGRID := utils.UUIDSha1Prefix()
+ scv.Upload(&s3manager.UploadInput{
+ Bucket: aws.String(s3Rdr.queueID),
+ Key: aws.String("home/test.json"),
+ Body: bytes.NewReader([]byte(fmt.Sprintf(`{"CGRID": "%s"}`, randomCGRID))),
+ })
+
+ if err = rdr.Serve(); err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case err = <-rdrErr:
+ t.Error(err)
+ case ev := <-rdrEvents:
+ if ev.rdrCfg.ID != "s3" {
+ t.Errorf("Expected 's3' received `%s`", ev.rdrCfg.ID)
+ }
+ expected := &utils.CGREvent{
+ Tenant: "cgrates.org",
+ ID: ev.cgrEvent.ID,
+ Time: ev.cgrEvent.Time,
+ Event: map[string]interface{}{
+ "CGRID": randomCGRID,
+ },
+ }
+ if !reflect.DeepEqual(ev.cgrEvent, expected) {
+ t.Errorf("Expected %s ,received %s", utils.ToJSON(expected), utils.ToJSON(ev.cgrEvent))
+ }
+ case <-time.After(10 * time.Second):
+ t.Fatal("Timeout")
+ }
+ close(rdrExit)
+}
diff --git a/packages/debian/changelog b/packages/debian/changelog
index 88cc9599f..7df8e94aa 100644
--- a/packages/debian/changelog
+++ b/packages/debian/changelog
@@ -112,6 +112,7 @@ cgrates (0.11.0~dev) UNRELEASED; urgency=medium
* [AttributeS] Add support for *prefix and *suffix type
* [ConfigS] Add "redis_" prefix to "dataDB" option for redis
* [DataDB] Add support for redis with TLS connection ( + integration test )
+ * [ERs] Added support for *s3_json_map type
-- DanB Wed, 19 Feb 2020 13:25:52 +0200