add lcr strategy

This commit is contained in:
Radu Ioan Fericean
2014-03-25 21:38:18 +02:00
parent d95d842ede
commit ee6b1cd74f
5 changed files with 159 additions and 20 deletions

View File

@@ -180,7 +180,7 @@ func (self *ApierV1) LoadRatingPlan(attrs AttrLoadRatingPlan, reply *string) err
}
//Automatic cache of the newly inserted rating plan
didNotChange := []string{}
if err := self.RatingDb.CacheRating(nil, nil, didNotChange, didNotChange); err != nil {
if err := self.RatingDb.CacheRating(nil, nil, didNotChange, didNotChange, didNotChange); err != nil {
return err
}
*reply = OK
@@ -198,7 +198,7 @@ func (self *ApierV1) LoadRatingProfile(attrs utils.TPRatingProfile, reply *strin
}
//Automatic cache of the newly inserted rating profile
didNotChange := []string{}
if err := self.RatingDb.CacheRating(didNotChange, didNotChange, []string{engine.RATING_PROFILE_PREFIX + attrs.KeyId()}, didNotChange); err != nil {
if err := self.RatingDb.CacheRating(didNotChange, didNotChange, []string{engine.RATING_PROFILE_PREFIX + attrs.KeyId()}, didNotChange, didNotChange); err != nil {
return err
}
*reply = OK
@@ -252,7 +252,7 @@ func (self *ApierV1) SetRatingProfile(attrs AttrSetRatingProfile, reply *string)
}
//Automatic cache of the newly inserted rating profile
didNotChange := []string{}
if err := self.RatingDb.CacheRating(didNotChange, didNotChange, []string{engine.RATING_PROFILE_PREFIX + keyId}, didNotChange); err != nil {
if err := self.RatingDb.CacheRating(didNotChange, didNotChange, []string{engine.RATING_PROFILE_PREFIX + keyId}, didNotChange, didNotChange); err != nil {
return err
}
*reply = OK
@@ -473,7 +473,7 @@ func (self *ApierV1) ReloadScheduler(input string, reply *string) error {
}
func (self *ApierV1) ReloadCache(attrs utils.ApiReloadCache, reply *string) error {
var dstKeys, rpKeys, rpfKeys, actKeys, shgKeys, rpAlsKeys, accAlsKeys []string
var dstKeys, rpKeys, rpfKeys, actKeys, shgKeys, rpAlsKeys, accAlsKeys, lcrKeys []string
if len(attrs.DestinationIds) > 0 {
dstKeys = make([]string, len(attrs.DestinationIds))
for idx, dId := range attrs.DestinationIds {
@@ -516,7 +516,13 @@ func (self *ApierV1) ReloadCache(attrs utils.ApiReloadCache, reply *string) erro
accAlsKeys[idx] = engine.ACC_ALIAS_PREFIX + alias
}
}
if err := self.RatingDb.CacheRating(dstKeys, rpKeys, rpfKeys, rpAlsKeys); err != nil {
if len(attrs.LCRIds) > 0 {
lcrKeys = make([]string, len(attrs.LCRIds))
for idx, lcrId := range attrs.LCRIds {
lcrKeys[idx] = engine.LCR_PREFIX + lcrId
}
}
if err := self.RatingDb.CacheRating(dstKeys, rpKeys, rpfKeys, rpAlsKeys, lcrKeys); err != nil {
return err
}
if err := self.AccountDb.CacheAccounting(actKeys, shgKeys, accAlsKeys); err != nil {
@@ -633,7 +639,12 @@ func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder,
for idx, alias := range accAliases {
accAlsKeys[idx] = engine.ACC_ALIAS_PREFIX + alias
}
if err := self.RatingDb.CacheRating(dstKeys, rpKeys, rpfKeys, rpAlsKeys); err != nil {
lcrIds, _ := loader.GetLoadedIds(engine.LCR_PREFIX)
lcrKeys := make([]string, len(lcrIds))
for idx, lcrId := range lcrIds {
lcrKeys[idx] = engine.LCR_PREFIX + lcrId
}
if err := self.RatingDb.CacheRating(dstKeys, rpKeys, rpfKeys, rpAlsKeys, lcrKeys); err != nil {
return err
}
if err := self.AccountDb.CacheAccounting(actKeys, shgKeys, accAlsKeys); err != nil {

View File

@@ -23,6 +23,7 @@ import (
"fmt"
"log"
"log/syslog"
"strings"
"time"
//"encoding/json"
"github.com/cgrates/cgrates/cache2go"
@@ -711,6 +712,54 @@ func (cd *CallDescriptor) Clone() *CallDescriptor {
}
}
func (cd *CallDescriptor) GetLCR() []*LCRCost {
return nil
func (cd *CallDescriptor) GetLCR() (*LCRCost, error) {
lcr, err := dataStorage.GetLCR(cd.GetLCRKey(), false)
if err != nil || lcr == nil {
return nil, err
}
lcr.Sort()
lcrCost := &LCRCost{
TimeSpans: []*LCRTimeSpan{&LCRTimeSpan{StartTime: cd.TimeStart}},
}
for _, lcrActivation := range lcr.Activations {
// TODO: filer entry by destination
lcrEntry := lcrActivation.GetLCREntryForPrefix(cd.Destination)
if lcrActivation.ActivationTime.Before(cd.TimeStart) ||
lcrActivation.ActivationTime.Equal(cd.TimeStart) {
lcrCost.TimeSpans[0].Entry = lcrEntry
} else {
if lcrActivation.ActivationTime.Before(cd.TimeEnd) {
// add lcr timespan
lcrCost.TimeSpans = append(lcrCost.TimeSpans, &LCRTimeSpan{
StartTime: lcrActivation.ActivationTime,
Entry: lcrEntry,
})
}
}
}
for _, ts := range lcrCost.TimeSpans {
if ts.Entry.Strategy == LCR_STRATEGY_STATIC {
for _, supplier := range strings.Split(ts.Entry.Suppliers, ";") {
supplier = strings.TrimSpace(supplier)
lcrCD := cd.Clone()
lcrCD.Subject = supplier
if cc, err := lcrCD.GetCost(); err != nil || cc == nil {
ts.SupplierCosts = append(ts.SupplierCosts, &LCRSupplierCost{
Supplier: supplier,
Error: err,
})
} else {
ts.SupplierCosts = append(ts.SupplierCosts, &LCRSupplierCost{
Supplier: supplier,
Cost: cc.Cost,
})
}
}
} else {
// find rating profiles
// sort according to strategy
ts.Sort()
}
}
return lcrCost, nil
}

View File

@@ -20,30 +20,106 @@ package engine
import (
"fmt"
"sort"
"time"
)
type LCR struct {
Tenant string
Customer string
Direction string
LCRs []*LCREntry
}
const (
LCR_STRATEGY_STATIC = "*static"
LCR_STRATEGY_LOWEST = "*lowest"
LCR_STRATEGY_HIGHEST = "*highest"
)
type LCREntry struct {
Destination string
TOR string
Strategy string
Suppliers string
type LCR struct {
Tenant string
Customer string
Direction string
Activations []*LCRActivation
}
type LCRActivation struct {
ActivationTime time.Time
Weight float64
Entries []*LCREntry
}
type LCREntry struct {
Destination string
TOR string
Strategy string
Suppliers string
}
type LCRCost struct {
TimeSpans []*LCRTimeSpan
}
type LCRTimeSpan struct {
StartTime time.Time
SupplierCosts []*LCRSupplierCost
Entry *LCREntry
}
type LCRSupplierCost struct {
Supplier string
Cost float64
Error error
}
func (lcr *LCR) GetId() string {
return fmt.Sprintf("%s:%s:%s", lcr.Direction, lcr.Tenant, lcr.Customer)
}
func (lcr *LCR) Len() int {
return len(lcr.Activations)
}
func (lcr *LCR) Swap(i, j int) {
lcr.Activations[i], lcr.Activations[j] = lcr.Activations[j], lcr.Activations[i]
}
func (lcr *LCR) Less(i, j int) bool {
return lcr.Activations[i].ActivationTime.Before(lcr.Activations[j].ActivationTime)
}
func (lcr *LCR) Sort() {
sort.Sort(lcr)
}
func (lcra *LCRActivation) GetLCREntryForPrefix(prefix string) *LCREntry {
return nil
}
func (lts *LCRTimeSpan) Sort() {
switch lts.Entry.Strategy {
case LCR_STRATEGY_LOWEST:
sort.Sort(LowestSupplierCostSorter(lts.SupplierCosts))
case LCR_STRATEGY_HIGHEST:
sort.Sort(HighestSupplierCostSorter(lts.SupplierCosts))
}
}
type LowestSupplierCostSorter []*LCRSupplierCost
func (lscs LowestSupplierCostSorter) Len() int {
return len(lscs)
}
func (lscs LowestSupplierCostSorter) Swap(i, j int) {
lscs[i], lscs[j] = lscs[j], lscs[i]
}
func (lscs LowestSupplierCostSorter) Less(i, j int) bool {
return lscs[i].Cost < lscs[j].Cost
}
type HighestSupplierCostSorter []*LCRSupplierCost
func (hscs HighestSupplierCostSorter) Len() int {
return len(hscs)
}
func (hscs HighestSupplierCostSorter) Swap(i, j int) {
hscs[i], hscs[j] = hscs[j], hscs[i]
}
func (hscs HighestSupplierCostSorter) Less(i, j int) bool {
return hscs[i].Cost > hscs[j].Cost
}

View File

@@ -50,6 +50,7 @@ type CSVReader struct {
ratingPlans map[string]*RatingPlan
ratingProfiles map[string]*RatingProfile
sharedGroups map[string]*SharedGroup
lcrs map[string]*LCR
// file names
destinationsFn, ratesFn, destinationratesFn, timingsFn, destinationratetimingsFn, ratingprofilesFn,
sharedgroupsFn, actionsFn, actiontimingsFn, actiontriggersFn, accountactionsFn string
@@ -70,6 +71,7 @@ func NewFileCSVReader(dataStorage RatingStorage, accountingStorage AccountingSto
c.ratingPlans = make(map[string]*RatingPlan)
c.ratingProfiles = make(map[string]*RatingProfile)
c.sharedGroups = make(map[string]*SharedGroup)
c.lcrs = make(map[string]*LCR)
c.readerFunc = openFileCSVReader
c.rpAliases = make(map[string]string)
c.accAliases = make(map[string]string)

View File

@@ -285,6 +285,7 @@ type ApiReloadCache struct {
SharedGroupIds []string
RpAliases []string
AccAliases []string
LCRIds []string
}
type AttrCacheStats struct { // Add in the future filters here maybe so we avoid counting complete cache