ApierV1.GetCacheKeys implementation, precaching on start fixes #614

This commit is contained in:
DanB
2016-12-28 13:06:00 +01:00
parent a9b10b7f65
commit 36bff4d663
7 changed files with 360 additions and 6 deletions

View File

@@ -1070,6 +1070,246 @@ func (self *ApierV1) GetCacheStats(attrs utils.AttrCacheStats, reply *utils.Cach
return nil
}
// GetCacheKeys returns a list of keys available in cache based on query arguments
// If keys are provided in arguments, they will be checked for existence
func (v1 *ApierV1) GetCacheKeys(args utils.ArgsCacheKeys, reply *utils.ArgsCache) (err error) {
var ids []string
if args.DestinationIDs != nil {
if len(*args.DestinationIDs) != 0 {
for _, id := range *args.DestinationIDs {
if _, hasIt := cache.Get(utils.DESTINATION_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.DESTINATION_PREFIX) {
ids = append(ids, id[len(utils.DESTINATION_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.DestinationIDs = &ids
}
}
if args.ReverseDestinationIDs != nil {
ids = nil // reset it
if len(*args.ReverseDestinationIDs) != 0 {
for _, id := range *args.ReverseDestinationIDs {
if _, hasIt := cache.Get(utils.REVERSE_DESTINATION_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.REVERSE_DESTINATION_PREFIX) {
ids = append(ids, id[len(utils.REVERSE_DESTINATION_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ReverseDestinationIDs = &ids
}
}
if args.RatingPlanIDs != nil {
ids = nil
if len(*args.RatingPlanIDs) != 0 {
for _, id := range *args.RatingPlanIDs {
if _, hasIt := cache.Get(utils.RATING_PLAN_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.RATING_PLAN_PREFIX) {
ids = append(ids, id[len(utils.RATING_PLAN_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.RatingPlanIDs = &ids
}
}
if args.RatingProfileIDs != nil {
ids = nil
if len(*args.RatingProfileIDs) != 0 {
for _, id := range *args.RatingProfileIDs {
if _, hasIt := cache.Get(utils.RATING_PROFILE_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.RATING_PROFILE_PREFIX) {
ids = append(ids, id[len(utils.RATING_PROFILE_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.RatingProfileIDs = &ids
}
}
if args.ActionIDs != nil {
ids = nil
if len(*args.ActionIDs) != 0 {
for _, id := range *args.ActionIDs {
if _, hasIt := cache.Get(utils.ACTION_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.ACTION_PREFIX) {
ids = append(ids, id[len(utils.ACTION_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ActionIDs = &ids
}
}
if args.ActionPlanIDs != nil {
ids = nil
if len(*args.ActionPlanIDs) != 0 {
for _, id := range *args.ActionPlanIDs {
if _, hasIt := cache.Get(utils.ACTION_PLAN_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.ACTION_PLAN_PREFIX) {
ids = append(ids, id[len(utils.ACTION_PLAN_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ActionPlanIDs = &ids
}
}
if args.ActionTriggerIDs != nil {
ids = nil
if len(*args.ActionTriggerIDs) != 0 {
for _, id := range *args.ActionTriggerIDs {
if _, hasIt := cache.Get(utils.ACTION_TRIGGER_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.ACTION_TRIGGER_PREFIX) {
ids = append(ids, id[len(utils.ACTION_TRIGGER_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ActionTriggerIDs = &ids
}
}
if args.SharedGroupIDs != nil {
ids = nil
if len(*args.SharedGroupIDs) != 0 {
for _, id := range *args.SharedGroupIDs {
if _, hasIt := cache.Get(utils.SHARED_GROUP_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.SHARED_GROUP_PREFIX) {
ids = append(ids, id[len(utils.SHARED_GROUP_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.SharedGroupIDs = &ids
}
}
if args.LCRids != nil {
ids = nil
if len(*args.LCRids) != 0 {
for _, id := range *args.LCRids {
if _, hasIt := cache.Get(utils.LCR_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.LCR_PREFIX) {
ids = append(ids, id[len(utils.LCR_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.LCRids = &ids
}
}
if args.DerivedChargerIDs != nil {
ids = nil
if len(*args.DerivedChargerIDs) != 0 {
for _, id := range *args.DerivedChargerIDs {
if _, hasIt := cache.Get(utils.DERIVEDCHARGERS_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.DERIVEDCHARGERS_PREFIX) {
ids = append(ids, id[len(utils.DERIVEDCHARGERS_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.DerivedChargerIDs = &ids
}
}
if args.AliasIDs != nil {
ids = nil
if len(*args.AliasIDs) != 0 {
for _, id := range *args.AliasIDs {
if _, hasIt := cache.Get(utils.ALIASES_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.ALIASES_PREFIX) {
ids = append(ids, id[len(utils.ALIASES_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.AliasIDs = &ids
}
}
if args.ReverseAliasIDs != nil {
ids = nil
if len(*args.ReverseAliasIDs) != 0 {
for _, id := range *args.ReverseAliasIDs {
if _, hasIt := cache.Get(utils.REVERSE_ALIASES_PREFIX + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.REVERSE_ALIASES_PREFIX) {
ids = append(ids, id[len(utils.REVERSE_ALIASES_PREFIX):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ReverseAliasIDs = &ids
}
}
if args.ResourceLimitIDs != nil {
ids = nil
if len(*args.ResourceLimitIDs) != 0 {
for _, id := range *args.ResourceLimitIDs {
if _, hasIt := cache.Get(utils.ResourceLimitsPrefix + id); hasIt {
ids = append(ids, id)
}
}
} else {
for _, id := range cache.GetEntryKeys(utils.ResourceLimitsPrefix) {
ids = append(ids, id[len(utils.ResourceLimitsPrefix):])
}
}
ids = args.Paginator.PaginateStringSlice(ids)
if len(ids) != 0 {
reply.ResourceLimitIDs = &ids
}
}
return
}
func (self *ApierV1) LoadTariffPlanFromFolder(attrs utils.AttrLoadTpFromFolder, reply *string) error {
if len(attrs.FolderPath) == 0 {
return fmt.Errorf("%s:%s", utils.ErrMandatoryIeMissing.Error(), "FolderPath")

View File

@@ -63,12 +63,13 @@ func startRater(internalRaterChan chan rpcclient.RpcClientConnection, cacheDoneC
go func() {
defer close(cacheTaskChan)
loadHist, err := accountDb.GetLoadHistory(1, true, utils.NonTransactional)
/*loadHist, err := accountDb.GetLoadHistory(1, true, utils.NonTransactional)
if err != nil || len(loadHist) == 0 {
utils.Logger.Info(fmt.Sprintf("could not get load history: %v (%v)", loadHist, err))
cacheDoneChan <- struct{}{}
return
}
*/
var dstIDs, rvDstIDs, rplIDs, rpfIDs, actIDs, aplIDs, atrgIDs, sgIDs, lcrIDs, dcIDs, alsIDs, rvAlsIDs, rlIDs []string
if cfg.CacheConfig.Destinations.Precache {
dstIDs = nil // Precache all

View File

@@ -402,7 +402,7 @@ func main() {
if *flush {
dstIds, rplIds, rpfIds, actIds, shgIds, alsIds, lcrIds, dcsIds, rlIDs, aps = nil, nil, nil, nil, nil, nil, nil, nil, nil, nil // Should reload all these on flush
}
if err = rater.Call("ApierV1.ReloadCache", utils.AttrReloadCache{
if err = rater.Call("ApierV1.ReloadCache", utils.AttrReloadCache{ArgsCache: utils.ArgsCache{
DestinationIDs: &dstIds,
RatingPlanIDs: &rplIds,
RatingProfileIDs: &rpfIds,
@@ -413,7 +413,7 @@ func main() {
LCRids: &lcrIds,
DerivedChargerIDs: &dcsIds,
ResourceLimitIDs: &rlIDs,
}, &reply); err != nil {
}}, &reply); err != nil {
log.Printf("WARNING: Got error on cache reload: %s\n", err.Error())
}

View File

@@ -14,6 +14,23 @@
"http": ":2080",
},
"cache":{
"destinations": {"limit": 10000, "ttl":"0s", "precache": true},
"reverse_destinations": {"limit": 10000, "ttl":"0s", "precache": true},
"rating_plans": {"limit": 10000, "ttl":"0s","precache": true},
"rating_profiles": {"limit": 10000, "ttl":"0s", "precache": true},
"lcr": {"limit": 10000, "ttl":"0s", "precache": true},
"cdr_stats": {"limit": 10000, "ttl":"0s", "precache": true},
"actions": {"limit": 10000, "ttl":"0s", "precache": true},
"action_plans": {"limit": 10000, "ttl":"0s", "precache": true},
"action_triggers": {"limit": 10000, "ttl":"0s", "precache": true},
"shared_groups": {"limit": 10000, "ttl":"0s", "precache": true},
"aliases": {"limit": 10000, "ttl":"0s", "precache": true},
"reverse_aliases": {"limit": 10000, "ttl":"0s", "precache": true},
"derived_chargers": {"limit": 10000, "ttl":"0s", "precache": true},
"resource_limits": {"limit": 10000, "ttl":"0s", "precache": true},
},
"rals": {
"enabled": true,
"cdrstats_conns": [

View File

@@ -110,6 +110,28 @@ func TestTutITCacheStats(t *testing.T) {
} else if !reflect.DeepEqual(expectedStats, rcvStats) {
t.Errorf("Calling ApierV2.GetCacheStats expected: %+v, received: %+v", expectedStats, rcvStats)
}
expKeys := utils.ArgsCache{DestinationIDs: &[]string{"DST_1003", "DST_1002", "DST_DE_MOBILE", "DST_1007", "DST_FS"}}
var rcvKeys utils.ArgsCache
if err := tutLocalRpc.Call("ApierV1.GetCacheKeys", utils.ArgsCacheKeys{ArgsCache: utils.ArgsCache{DestinationIDs: &[]string{}}}, &rcvKeys); err != nil {
t.Error("Got error on ApierV2.GetCacheStats: ", err.Error())
} else {
if len(*expKeys.DestinationIDs) != len(*rcvKeys.DestinationIDs) {
t.Errorf("Expected: %+v, received: %+v", expKeys.DestinationIDs, rcvKeys.DestinationIDs)
}
}
if _, err := engine.StopStartEngine(tutLocalCfgPath, 1500); err != nil {
t.Fatal(err)
}
var err error
tutLocalRpc, err = jsonrpc.Dial("tcp", tutFsLocalCfg.RPCJSONListen) // We connect over JSON so we can also troubleshoot if needed
if err != nil {
t.Fatal(err)
}
if err := tutLocalRpc.Call("ApierV2.GetCacheStats", args, &rcvStats); err != nil {
t.Error("Got error on ApierV2.GetCacheStats: ", err.Error())
} else if !reflect.DeepEqual(expectedStats, rcvStats) {
t.Errorf("Calling ApierV2.GetCacheStats expected: %+v, received: %+v", expectedStats, rcvStats)
}
}
func TestTutITGetUsers(t *testing.T) {

View File

@@ -38,6 +38,39 @@ type Paginator struct {
SearchTerm string // Global matching pattern in items returned, partially used in some APIs
}
func (pgnt *Paginator) PaginateStringSlice(in []string) (out []string) {
if len(in) == 0 {
return
}
var limit, offset int
if pgnt.Limit != nil && *pgnt.Limit > 0 {
limit = *pgnt.Limit
}
if pgnt.Offset != nil && *pgnt.Offset > 0 {
offset = *pgnt.Offset
}
if limit == 0 && offset == 0 {
return in
}
if offset > len(in) {
return
}
if offset != 0 {
limit = limit + offset
}
if limit == 0 {
limit = len(in[offset:])
} else if limit > len(in) {
limit = len(in)
}
ret := in[offset:limit]
out = make([]string, len(ret))
for i, itm := range ret {
out[i] = itm
}
return
}
// Deprecated version of TPDestination
type V1TPDestination struct {
TPid string // Tariff plan id
@@ -559,8 +592,7 @@ type AttrGetAccounts struct {
Limit int // Limit number of items retrieved
}
// Data used to do remote cache reloads via api
type AttrReloadCache struct {
type ArgsCache struct {
DestinationIDs *[]string
ReverseDestinationIDs *[]string
RatingPlanIDs *[]string
@@ -574,7 +606,20 @@ type AttrReloadCache struct {
AliasIDs *[]string
ReverseAliasIDs *[]string
ResourceLimitIDs *[]string
FlushAll bool // If provided, cache flush will be executed before any action
}
// Data used to do remote cache reloads via api
type AttrReloadCache struct {
ArgsCache
FlushAll bool // If provided, cache flush will be executed before any action
}
type ArgsCacheKeys struct {
ArgsCache
Paginator
}
type CacheKeys struct {
}
type AttrCacheStats struct { // Add in the future filters here maybe so we avoid counting complete cache

View File

@@ -30,3 +30,32 @@ func TestNewDTCSFromRPKey(t *testing.T) {
t.Error("Received: ", dtcs)
}
}
func TestPaginatorPaginateStringSlice(t *testing.T) {
eOut := []string{"1", "2", "3", "4"}
pgnt := new(Paginator)
if rcv := pgnt.PaginateStringSlice([]string{"1", "2", "3", "4"}); !reflect.DeepEqual(eOut, rcv) {
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
}
eOut = []string{"1", "2", "3"}
pgnt.Limit = IntPointer(3)
if rcv := pgnt.PaginateStringSlice([]string{"1", "2", "3", "4"}); !reflect.DeepEqual(eOut, rcv) {
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
}
eOut = []string{"2", "3", "4"}
pgnt.Offset = IntPointer(1)
if rcv := pgnt.PaginateStringSlice([]string{"1", "2", "3", "4"}); !reflect.DeepEqual(eOut, rcv) {
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
}
eOut = []string{}
pgnt.Offset = IntPointer(4)
if rcv := pgnt.PaginateStringSlice([]string{"1", "2", "3", "4"}); !reflect.DeepEqual(eOut, rcv) {
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
}
eOut = []string{"3"}
pgnt.Offset = IntPointer(2)
pgnt.Limit = IntPointer(1)
if rcv := pgnt.PaginateStringSlice([]string{"1", "2", "3", "4"}); !reflect.DeepEqual(eOut, rcv) {
t.Errorf("Expecting: %+v, received: %+v", eOut, rcv)
}
}