Merge pull request #734 from Edwardro22/master

Added Tenant field to Stats and Resources
This commit is contained in:
Dan Christian Bogos
2017-09-13 18:40:33 +02:00
committed by GitHub
25 changed files with 631 additions and 66 deletions

View File

@@ -127,6 +127,7 @@ func testTPResGetTPResourceBeforeSet(t *testing.T) {
func testTPResSetTPResource(t *testing.T) {
tpRes = &utils.TPResource{
Tenant:"Tester",
TPid: "TPR1",
ID: "Res",
Filters: []*utils.TPRequestFilter{

View File

@@ -36,6 +36,7 @@ func (self *ApierV1) SetTPStat(attr utils.TPStats, reply *string) error {
type AttrGetTPStat struct {
TPid string // Tariff plan id
Tenant string
ID string
}
@@ -57,6 +58,7 @@ func (self *ApierV1) GetTPStat(attr AttrGetTPStat, reply *utils.TPStats) error {
type AttrGetTPStatIds struct {
TPid string // Tariff plan id
Tenant string
utils.Paginator
}

View File

@@ -127,6 +127,7 @@ func testTPStatsGetTPStatBeforeSet(t *testing.T) {
func testTPStatsSetTPStat(t *testing.T) {
tpStat = &utils.TPStats{
Tenant: "Tester",
TPid: "TPS1",
ID: "Stat1",
Filters: []*utils.TPRequestFilter{

View File

@@ -65,4 +65,4 @@ if len(from_keys) > 0:
print('Migration complete.')
# no keys found
else:
print('No keys with keymask %s found in source database' % keymask)
print('No keys with keymask %s found in source database' % keymask)

View File

@@ -0,0 +1,101 @@
#!/usr/bin/python
# depends:
# ^ pymongo # install via: easy_install pymongo
# behaviour:
# ^ the script will "move" the collections if source and target server are the same
# but will "copy" (dump/restore) if source and target servers are different
ignore_empty_cols = True
# Do not migrate collections with 0 document count.
# Works only if from/to is on same host.
# Overwrite target collections flag.
# Works only if from/to is on same host.
# If from/to hosts are different we use mongorestore which overwrites by default.
drop_target = False
dump_folder = 'dump'
import os
import sys
from pymongo import MongoClient
from urllib import quote_plus
from collections import OrderedDict
from_host =os.environ["cgr_from_host"]
from_port =os.environ["cgr_from_port"]
from_db =os.environ["cgr_from_db"]
from_auth_db =os.environ["cgr_from_auth_db"]
from_user =os.environ["cgr_from_user"]
from_pass =os.environ["cgr_from_pass"]
to_host =os.environ["cgr_to_host"]
to_port =os.environ["cgr_to_port"]
to_db =os.environ["cgr_to_db"]
to_auth_db =os.environ["cgr_to_auth_db"]
to_user =os.environ["cgr_to_user"]
to_pass =os.environ["cgr_to_pass"]
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db
if from_pass == '': # disabled auth
mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db
client = MongoClient(mongo_from_url)
db = client[from_db]
cols = db.collection_names()
# collections found
if len(cols) > 0:
print('Found %d collections on source. Moving...' % len(cols))
i = 0
for col in cols:
i += 1
if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0):
print('Moving collection %s (%d of %d)...' % (col, i, len(cols)))
try:
client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)]))
except:
e = sys.exc_info()[0]
print(e)
else:
print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols)))
# no collections found
else:
print('No collections in source database.')
# different servers
else:
import subprocess
import os
import shutil
print('Migrating between different servers...')
print('Dumping...')
out = subprocess.check_output([
'mongodump',
'--host', '%s' % from_host,
'-u', '%s' % from_user,
'-p', '%s' % from_pass,
'--authenticationDatabase', '%s' % from_auth_db,
'--db', '%s' % from_db,
'--port', '%s' % from_port,
'-o', '%s' % dump_folder,
], stderr= subprocess.STDOUT)
print('Dump complete.')
print('Restoring...')
out = subprocess.check_output([
'mongorestore',
'--host', '%s' % to_host,
'-u', '%s' % to_user,
'-p', '%s' % to_pass,
'--authenticationDatabase', '%s' % to_auth_db,
'--db', '%s' % to_db,
'--port', '%s' % to_port,
'--drop', '%s/%s' % (dump_folder, from_db),
], stderr= subprocess.STDOUT)
print('Restore complete.')
print('Migration complete.')

View File

@@ -0,0 +1,68 @@
#!/usr/bin/python
# depends:
# ^ redis # install via easy_install redis
# asserts:
# ^ destination redis is not password protected when connected from source redis server
# (https://github.com/antirez/redis/pull/2507)
# behaviour:
# ^ the script will not overwrite keys on the destination server/database
keymask = '*'
timeout = 2000
import time
import redis
import os
from_host =str(os.environ["cgr_from_host"])
from_port = int(os.environ["cgr_from_port"])
from_db =int(os.environ["cgr_from_db"])
from_pass =os.environ["cgr_from_pass"]
to_host =os.environ["cgr_to_host"]
to_port =int(os.environ["cgr_to_port"])
to_db =int(os.environ["cgr_to_db"])
# to_pass =os.environ["cgr_to_pass"] # Not used
from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db)
to_redis = redis.Redis(host = to_host, port = to_port, db = to_db)
to_keys = to_redis.keys(keymask)
from_keys = from_redis.keys(keymask)
print('Found %d keys on source.' % len(from_keys))
print('Found %d keys on destination.' % len(to_keys))
# keys found
if len(from_keys) > 0:
# same server
if from_host == to_host and from_port == to_port:
print('Migrating on same server...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys)))
from_redis.execute_command('MOVE', key, to_db)
# different servers
else:
print('Migrating between different servers...')
i = 0
for key in from_keys:
i += 1
print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))),
try:
from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout)
except redis.exceptions.ResponseError, e:
if not 'ERR Target key name is busy' in str(e):
raise e
print('Done.')
# done
from_keys_after = from_redis.keys(keymask)
to_keys_after = to_redis.keys(keymask)
print('There are now %d keys on source.' % len(from_keys_after))
print('There are now %d keys on destination.' % len(to_keys_after))
print('%d keys were moved' % (len(to_keys_after) - len(to_keys)))
print('Migration complete.')
# no keys found
else:
print('No keys with keymask %s found in source database' % keymask)

View File

@@ -0,0 +1,58 @@
/*
This script will migrate CDRs from the old CGRateS tables to the new cdrs table
but it only migrate CDRs where the duration is > 0.
If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage
Also the script will process 10,000 CDRs before committing to save system resources
especially in systems where they are millions of CDRs to be migrated
You can increase or lower the value of step in the line after BEGIN below.
You have to use 'CALL cgrates.migration();' to execute the script. If named other then default use that database name.
*/
DELIMITER //
CREATE PROCEDURE `migration`()
BEGIN
/* DECLARE variables */
DECLARE max_cdrs bigint;
DECLARE start_id bigint;
DECLARE end_id bigint;
DECLARE step bigint;
/* Optimize table for performance */
ALTER TABLE cdrs DISABLE KEYS;
SET autocommit=0;
SET unique_checks=0;
SET foreign_key_checks=0;
/* You must change the step var to commit every step rows inserted */
SET step := 10000;
SET start_id := 0;
SET end_id := start_id + step;
SET max_cdrs = (select max(id) from rated_cdrs);
WHILE (start_id <= max_cdrs) DO
INSERT INTO
cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,`usage`,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at)
SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.`usage`,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at
FROM rated_cdrs
INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid
INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid
INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid
WHERE cdrs_primary.`usage` > '0'
AND not exists (select 1 from cdrs where cdrs.cgrid = cdrs_primary.cgrid AND cdrs.run_id=rated_cdrs.runid)
AND rated_cdrs.id >= start_id
AND rated_cdrs.id < end_id
GROUP BY cgrid, run_id, origin_id;
SET start_id = start_id + step;
SET end_id = end_id + step;
END WHILE;
/* SET Table for live usage */
SET autocommit=1;
SET unique_checks=1;
SET foreign_key_checks=1;
ALTER TABLE cdrs ENABLE KEYS;
OPTIMIZE TABLE cdrs;
END //
DELIMITER ;
CALL cgrates.migration();

View File

@@ -0,0 +1,79 @@
use cgrates;
--
-- Table structure for table `cdrs`
--
DROP TABLE IF EXISTS cdrs;
CREATE TABLE cdrs (
id int(11) NOT NULL AUTO_INCREMENT,
cgrid char(40) NOT NULL,
run_id varchar(64) NOT NULL,
origin_host varchar(64) NOT NULL,
source varchar(64) NOT NULL,
origin_id varchar(64) NOT NULL,
tor varchar(16) NOT NULL,
request_type varchar(24) NOT NULL,
direction varchar(8) NOT NULL,
tenant varchar(64) NOT NULL,
category varchar(32) NOT NULL,
account varchar(128) NOT NULL,
subject varchar(128) NOT NULL,
destination varchar(128) NOT NULL,
setup_time datetime NOT NULL,
pdd DECIMAL(12,9) NOT NULL,
answer_time datetime NOT NULL,
`usage` DECIMAL(30,9) NOT NULL,
supplier varchar(128) NOT NULL,
disconnect_cause varchar(64) NOT NULL,
extra_fields text NOT NULL,
cost_source varchar(64) NOT NULL,
cost DECIMAL(20,4) NOT NULL,
cost_details text,
account_summary text,
extra_info text,
created_at TIMESTAMP NULL,
updated_at TIMESTAMP NULL,
deleted_at TIMESTAMP NULL,
PRIMARY KEY (id),
UNIQUE KEY cdrrun (cgrid, run_id, origin_id)
);
DROP TABLE IF EXISTS sm_costs;
CREATE TABLE sm_costs (
id int(11) NOT NULL AUTO_INCREMENT,
cgrid char(40) NOT NULL,
run_id varchar(64) NOT NULL,
origin_host varchar(64) NOT NULL,
origin_id varchar(64) NOT NULL,
cost_source varchar(64) NOT NULL,
`usage` DECIMAL(30,9) NOT NULL,
cost_details text,
created_at TIMESTAMP NULL,
deleted_at TIMESTAMP NULL,
PRIMARY KEY (`id`),
UNIQUE KEY costid (cgrid, run_id),
KEY origin_idx (origin_host, origin_id),
KEY run_origin_idx (run_id, origin_id),
KEY deleted_at_idx (deleted_at)
);
--
-- Table structure for table `versions`
--
DROP TABLE IF EXISTS versions;
CREATE TABLE versions (
`id` int(11) NOT NULL AUTO_INCREMENT,
`item` varchar(64) NOT NULL,
`version` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `item` (`item`)
);
ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9);
ALTER TABLE cdrs ADD `usage` DECIMAL(30);
UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL;
ALTER TABLE cdrs DROP COLUMN usage_old;

View File

@@ -0,0 +1,47 @@
/*
This script will migrate CDRs from the old CGRateS tables to the new cdrs table
but it only migrate CDRs where the duration is > 0.
If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage
Also the script will process 10,000 CDRs before committing to save system resources
especially in systems where they are millions of CDRs to be migrated
You can increase or lower the value of step in the line after BEGIN below.
*/
DO $$
DECLARE
max_cdrs bigint;
start_id bigint;
end_id bigint;
step bigint;
BEGIN
/* You must change the step var to commit every step rows inserted */
step := 10000;
start_id := 0;
end_id := start_id + step;
select max(id) INTO max_cdrs from rated_cdrs;
WHILE start_id <= max_cdrs
LOOP
--RAISE NOTICE '%', (to_char(start_id, '99999999') || '-' || to_char(end_id, '99999999'));
INSERT INTO
cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,usage,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at)
SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.usage,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at
FROM rated_cdrs
INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid
INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid
INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid
WHERE cdrs_primary.usage > '0'
AND not exists (select 1 from cdrs c where c.cgrid = cdrs_primary.cgrid)
AND rated_cdrs.id >= start_id
AND rated_cdrs.id < end_id
;
start_id = start_id + step;
end_id = end_id + step;
END LOOP;
END
$$;
ALTER TABLE cdrs RENAME COLUMN usage to usage_old;
ALTER TABLE cdrs ADD usage NUMERIC(30);
UPDATE cdrs SET usage = usage_old * 1000000000 WHERE usage_old IS NOT NULL;
ALTER TABLE cdrs DROP COLUMN usage_old;

View File

@@ -0,0 +1,79 @@
--
-- Table structure for table `cdrs`
--
DROP TABLE IF EXISTS cdrs;
CREATE TABLE cdrs (
id SERIAL PRIMARY KEY,
cgrid CHAR(40) NOT NULL,
run_id VARCHAR(64) NOT NULL,
origin_host VARCHAR(64) NOT NULL,
source VARCHAR(64) NOT NULL,
origin_id VARCHAR(64) NOT NULL,
tor VARCHAR(16) NOT NULL,
request_type VARCHAR(24) NOT NULL,
direction VARCHAR(8) NOT NULL,
tenant VARCHAR(64) NOT NULL,
category VARCHAR(32) NOT NULL,
account VARCHAR(128) NOT NULL,
subject VARCHAR(128) NOT NULL,
destination VARCHAR(128) NOT NULL,
setup_time TIMESTAMP WITH TIME ZONE NOT NULL,
pdd NUMERIC(12,9) NOT NULL,
answer_time TIMESTAMP WITH TIME ZONE NOT NULL,
usage NUMERIC(30,9) NOT NULL,
supplier VARCHAR(128) NOT NULL,
disconnect_cause VARCHAR(64) NOT NULL,
extra_fields jsonb NOT NULL,
cost_source VARCHAR(64) NOT NULL,
cost NUMERIC(20,4) DEFAULT NULL,
cost_details jsonb,
account_summary jsonb,
extra_info text,
created_at TIMESTAMP WITH TIME ZONE,
updated_at TIMESTAMP WITH TIME ZONE NULL,
deleted_at TIMESTAMP WITH TIME ZONE NULL,
UNIQUE (cgrid, run_id, origin_id)
);
;
DROP INDEX IF EXISTS deleted_at_cp_idx;
CREATE INDEX deleted_at_cp_idx ON cdrs (deleted_at);
DROP TABLE IF EXISTS sm_costs;
CREATE TABLE sm_costs (
id SERIAL PRIMARY KEY,
cgrid CHAR(40) NOT NULL,
run_id VARCHAR(64) NOT NULL,
origin_host VARCHAR(64) NOT NULL,
origin_id VARCHAR(64) NOT NULL,
cost_source VARCHAR(64) NOT NULL,
usage NUMERIC(30,9) NOT NULL,
cost_details jsonb,
created_at TIMESTAMP WITH TIME ZONE,
deleted_at TIMESTAMP WITH TIME ZONE NULL,
UNIQUE (cgrid, run_id)
);
DROP INDEX IF EXISTS cgrid_smcost_idx;
CREATE INDEX cgrid_smcost_idx ON sm_costs (cgrid, run_id);
DROP INDEX IF EXISTS origin_smcost_idx;
CREATE INDEX origin_smcost_idx ON sm_costs (origin_host, origin_id);
DROP INDEX IF EXISTS run_origin_smcost_idx;
CREATE INDEX run_origin_smcost_idx ON sm_costs (run_id, origin_id);
DROP INDEX IF EXISTS deleted_at_smcost_idx;
CREATE INDEX deleted_at_smcost_idx ON sm_costs (deleted_at);
--
-- Table structure for table `versions`
--
DROP TABLE IF EXISTS versions;
CREATE TABLE versions (
"id" SERIAL PRIMARY KEY,
"item" varchar(64) NOT NULL,
"version" INTEGER NOT NULL,
UNIQUE (item)
);

View File

@@ -0,0 +1,102 @@
#! /usr/bin/env sh
echo ""
echo "rc7_to_rc8.sh"
#settings
#DBs Config
datadb="redis"
stordb="mysql"
echo "dataDB:"$datadb " storDB:"$stordb
echo ""
#dataDBs
case $datadb in
"redis")
#Redis Config
export cgr_from_host='127.0.0.1'
export cgr_from_port=6379
export cgr_from_db=11
export cgr_from_pass=''
export cgr_to_host='127.0.0.1'
export cgr_to_port=6379
export cgr_to_db=10
export cgr_to_pass='' # Not used
;;
"mongo")
#Mongo Config
export cgr_from_host='127.0.0.1'
export cgr_from_port='27017'
export cgr_from_db='11'
export cgr_from_auth_db='cgrates' # Auth db on source server
export cgr_from_user='cgrates'
export cgr_from_pass=''
export cgr_to_host='127.0.0.1'
export cgr_to_port='27017'
export cgr_to_db='10'
export cgr_to_auth_db="cgrates" # Auth db on target server
export cgr_to_user='cgrates'
export cgr_to_pass=''
;;
esac
export PGPASSWORD="CGRateS.org"
#StorDBs
case $stordb in
"mysql")
#mysql Config
user="cgrates"
host="127.0.0.1"
db="cgrates"
;;
"postgres")
#postgres Config
user="cgrates"
host="127.0.0.1"
db="cgrates"
;;
esac
DIR="$(dirname "$(readlink -f "$0")")"
#DataDB switch
case $datadb in
"redis")
echo "executing dbsmerge_redis.py"
./dbsmerge_redis.py
echo "done!"
;;
"mongo")
echo "executing dbsmerge_mongo.py"
./dbsmerge_mongo.py
echo "done!"
;;
esac
#StorDB switch
case $stordb in
"mysql")
mysql -u$user -p$PGPASSWORD -h $host < "$DIR"/mysql_tables_update.sql
up=$?
mysql -u$user -p$PGPASSWORD -h $host -D cgrates < "$DIR"/mysql_cdr_migration.sql
mig=$?
;;
"postgres")
psql -U $user -h $host -d cgrates -f "$DIR"/pq_tables_update.sql
up=$?
psql -U $user -h $host -d cgrates -f "$DIR"/pg_cdr_migration.sql
mig=$?
;;
esac
if [ $up = 0 ] && [ $mig = 0 ]; then
echo -e "\n\t+++ The script ran successfully ! +++\n"
exit 0
fi

View File

@@ -397,6 +397,7 @@ CREATE TABLE tp_aliases (
DROP TABLE IF EXISTS tp_resources;
CREATE TABLE tp_resources (
`tenant` varchar(64) NOT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`tpid` varchar(64) NOT NULL,
`tag` varchar(64) NOT NULL,
@@ -423,6 +424,7 @@ CREATE TABLE tp_resources (
DROP TABLE IF EXISTS tp_stats;
CREATE TABLE tp_stats (
`tenant` varchar(64) NOT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`tpid` varchar(64) NOT NULL,
`tag` varchar(64) NOT NULL,

View File

@@ -393,6 +393,7 @@ CREATE INDEX tpaliases_idx ON tp_aliases (tpid,direction,tenant,category,account
DROP TABLE IF EXISTS tp_resources;
CREATE TABLE tp_resources (
"tenant"varchar(64) NOT NULL,
"id" SERIAL PRIMARY KEY,
"tpid" varchar(64) NOT NULL,
"tag" varchar(64) NOT NULL,
@@ -419,6 +420,7 @@ CREATE INDEX tp_resources_unique ON tp_resources ("tpid", "tag", "filter_type",
DROP TABLE IF EXISTS tp_stats;
CREATE TABLE tp_stats (
"tenant"varchar(64) NOT NULL,
"id" SERIAL PRIMARY KEY,
"tpid" varchar(64) NOT NULL,
"tag" varchar(64) NOT NULL,

View File

@@ -1,6 +1,6 @@
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Blocker[8],Stored[9],Weight[10],Thresholds[11]
ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20,
ResGroup1,*string_prefix,Destination,10;20,,,,,,,,
ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,,
ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10,
ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,,
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20,
Tester,ResGroup1,*string_prefix,Destination,10;20,,,,,,,,
Tester,ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,,
Tester,ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10,
Tester,ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,,
1 #Id[0] #Tenant[0] FilterType[1] Id[1] FilterFieldName[2] FilterType[2] FilterFieldValues[3] FilterFieldName[3] ActivationInterval[4] FilterFieldValues[4] TTL[5] ActivationInterval[5] Limit[6] TTL[6] AllocationMessage[7] Limit[7] Blocker[8] AllocationMessage[8] Stored[9] Blocker[9] Weight[10] Stored[10] Thresholds[11] Weight[11] Thresholds[12]
2 ResGroup1 Tester *string ResGroup1 Account *string 1001;1002 Account 2014-07-29T15:00:00Z 1001;1002 1s 2014-07-29T15:00:00Z 7 1s 7 true true true 20 true 20
3 ResGroup1 Tester *string_prefix ResGroup1 Destination *string_prefix 10;20 Destination 10;20
4 ResGroup1 Tester *rsr_fields ResGroup1 *rsr_fields Subject(~^1.*1$);Destination(1002) Subject(~^1.*1$);Destination(1002)
5 ResGroup2 Tester *destinations ResGroup2 Destination *destinations DST_FS Destination 2014-07-29T15:00:00Z DST_FS 3600s 2014-07-29T15:00:00Z 8 3600s SPECIAL_1002 8 true SPECIAL_1002 true true 10 true 10
6 ResGroup3 Tester *cdr_stats ResGroup3 *cdr_stats CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20 CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20

View File

@@ -1,2 +1,2 @@
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11]
Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
1 #Id[0] #Tenant[0] FilterType[1] Id[1] FilterFieldName[2] FilterType[2] FilterFieldValues[3] FilterFieldName[3] ActivationInterval[4] FilterFieldValues[4] QueueLength[5] ActivationInterval[5] TTL[6] QueueLength[6] Metrics[7] TTL[7] Blocker[8] Metrics[8] Stored[9] Blocker[9] Weight[10] Stored[10] Thresholds[11] Weight[11] Thresholds[12]
2 Stats1 Tester *string Stats1 Account *string 1001;1002 Account 2014-07-29T15:00:00Z 1001;1002 100 2014-07-29T15:00:00Z 1s 100 *asr;*acd;*acc 1s true *asr;*acd;*acc true true 20 true THRESH1;THRESH2 20 THRESH1;THRESH2

View File

@@ -1,8 +1,8 @@
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Blocker[8],Stored[9],Weight[10],Thresholds[11]
ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20,
ResGroup1,*string_prefix,Destination,10;20,,,,,,,,
ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,,
ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10,
ResGroup3,*string,Account,3001,2014-07-29T15:00:00Z,1s,3,,true,true,20,
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20,
Tester,ResGroup1,*string_prefix,Destination,10;20,,,,,,,,
Tester,ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,,
Tester,ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10,
Tester,ResGroup3,*string,Account,3001,2014-07-29T15:00:00Z,1s,3,,true,true,20,
#ResGroup3,*timings,SetupTime,PEAK,,,,,,,,
#ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,,
1 #Id[0] #Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] FilterType[1] FilterFieldName[2] FilterFieldValues[3] ActivationInterval[4] TTL[5] Limit[6] AllocationMessage[7] Blocker[8] Stored[9] Weight[10] Thresholds[11]
2 ResGroup1 Tester,ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20, *string Account 1001;1002 2014-07-29T15:00:00Z 1s 7 true true 20
3 ResGroup1 Tester,ResGroup1,*string_prefix,Destination,10;20,,,,,,,, *string_prefix Destination 10;20
4 ResGroup1 Tester,ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,, *rsr_fields Subject(~^1.*1$);Destination(1002)
5 ResGroup2 Tester,ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10, *destinations Destination DST_FS 2014-07-29T15:00:00Z 3600s 8 SPECIAL_1002 true true 10
6 ResGroup3 Tester,ResGroup3,*string,Account,3001,2014-07-29T15:00:00Z,1s,3,,true,true,20, *string Account 3001 2014-07-29T15:00:00Z 1s 3 true true 20
7 #ResGroup3 #ResGroup3,*timings,SetupTime,PEAK,,,,,,,, *timings SetupTime PEAK
8 #ResGroup3 #ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,, *cdr_stats CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20

View File

@@ -1,2 +1,2 @@
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11]
Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
1 #Id[0] #Tenant[0] FilterType[1] Id[1] FilterFieldName[2] FilterType[2] FilterFieldValues[3] FilterFieldName[3] ActivationInterval[4] FilterFieldValues[4] QueueLength[5] ActivationInterval[5] TTL[6] QueueLength[6] Metrics[7] TTL[7] Blocker[8] Metrics[8] Stored[9] Blocker[9] Weight[10] Stored[10] Thresholds[11] Weight[11] Thresholds[12]
2 Stats1 Tester *string Stats1 Account *string 1001;1002 Account 2014-07-29T15:00:00Z 1001;1002 100 2014-07-29T15:00:00Z 1s 100 *asr;*acd;*acc 1s true *asr;*acd;*acc true true 20 true THRESH1;THRESH2 20 THRESH1;THRESH2

View File

@@ -42,8 +42,8 @@ func InitDataDb(cfg *config.CGRConfig) error {
return err
}
dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
// Write version before starting
if err := CheckVersions(dataDB); err != nil {
// Write version before starting
if err := CheckVersions(dataDB); err != nil {
return err
}

View File

@@ -266,19 +266,20 @@ cgrates.org,mas,true,another,value,10
*out,cgrates.org,call,remo,remo,*any,*rating,Account,remo,minu,10
`
resProfiles = `
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Weight[8],Thresholds[9]
ResGroup21,*string,HdrAccount,1001;1002,2014-07-29T15:00:00Z,1s,2,call,true,true,10,
ResGroup21,*string_prefix,HdrDestination,10;20,,,,,,,,
ResGroup21,*rsr_fields,,HdrSubject(~^1.*1$);HdrDestination(1002),,,,,,,,
ResGroup22,*destinations,HdrDestination,DST_FS,2014-07-29T15:00:00Z,3600s,2,premium_call,true,true,10,
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,ResGroup21,*string,HdrAccount,1001;1002,2014-07-29T15:00:00Z,1s,2,call,true,true,10,
Tester,ResGroup21,*string_prefix,HdrDestination,10;20,,,,,,,,
Tester,ResGroup21,*rsr_fields,,HdrSubject(~^1.*1$);HdrDestination(1002),,,,,,,,
Tester,ResGroup22,*destinations,HdrDestination,DST_FS,2014-07-29T15:00:00Z,3600s,2,premium_call,true,true,10,
`
stats = `
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11]
Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12]
Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2
`
thresholds = `
#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],ThresholdType[5],ThresholdValue[6],MinItems[7],Recurrent[8],MinSleep[9],Blocker[10],Stored[11],Weight[12],ActionIDs[13]
Threshold1,*string,Account,1001;1002,2014-07-29T15:00:00Z,,1.2,10,true,1s,true,true,10,
`
)
@@ -1391,8 +1392,9 @@ func TestLoadReverseAliases(t *testing.T) {
func TestLoadResourceProfiles(t *testing.T) {
eResProfiles := map[string]*utils.TPResource{
"ResGroup21": &utils.TPResource{
TPid: testTPID,
ID: "ResGroup21",
TPid: testTPID,
Tenant: "Tester",
ID: "ResGroup21",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{Type: MetaString, FieldName: "HdrAccount", Values: []string{"1001", "1002"}},
&utils.TPRequestFilter{Type: MetaStringPrefix, FieldName: "HdrDestination", Values: []string{"10", "20"}},
@@ -1407,8 +1409,9 @@ func TestLoadResourceProfiles(t *testing.T) {
Limit: "2",
},
"ResGroup22": &utils.TPResource{
TPid: testTPID,
ID: "ResGroup22",
TPid: testTPID,
Tenant: "Tester",
ID: "ResGroup22",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{Type: MetaDestinations, FieldName: "HdrDestination", Values: []string{"DST_FS"}},
},
@@ -1435,8 +1438,9 @@ func TestLoadResourceProfiles(t *testing.T) {
func TestLoadStats(t *testing.T) {
eStats := map[string]*utils.TPStats{
"Stats1": &utils.TPStats{
TPid: testTPID,
ID: "Stats1",
Tenant: "Tester",
TPid: testTPID,
ID: "Stats1",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{Type: MetaString, FieldName: "Account", Values: []string{"1001", "1002"}},
},

View File

@@ -1821,6 +1821,7 @@ func (tps TpResources) AsTPResources() (result []*utils.TPResource) {
if !found {
rl = &utils.TPResource{
TPid: tp.Tpid,
Tenant: tp.Tenant,
ID: tp.Tag,
Blocker: tp.Blocker,
Stored: tp.Stored,
@@ -1879,8 +1880,9 @@ func APItoModelResource(rl *utils.TPResource) (mdls TpResources) {
}
for i, fltr := range rl.Filters {
mdl := &TpResource{
Tpid: rl.TPid,
Tag: rl.ID,
Tpid: rl.TPid,
Tenant: rl.Tenant,
Tag: rl.ID,
}
if i == 0 {
mdl.UsageTTL = rl.UsageTTL
@@ -1921,6 +1923,7 @@ func APItoModelResource(rl *utils.TPResource) (mdls TpResources) {
func APItoResource(tpRL *utils.TPResource, timezone string) (rp *ResourceProfile, err error) {
rp = &ResourceProfile{
Tenant: tpRL.Tenant,
ID: tpRL.ID,
Weight: tpRL.Weight,
Blocker: tpRL.Blocker,
@@ -1960,6 +1963,7 @@ func (tps TpStatsS) AsTPStats() (result []*utils.TPStats) {
st, found := mst[tp.Tag]
if !found {
st = &utils.TPStats{
Tenant: tp.Tenant,
TPid: tp.Tpid,
ID: tp.Tag,
Blocker: tp.Blocker,
@@ -2027,8 +2031,9 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) {
}
for i, fltr := range st.Filters {
mdl := &TpStats{
Tpid: st.TPid,
Tag: st.ID,
Tenant: st.Tenant,
Tpid: st.TPid,
Tag: st.ID,
}
if i == 0 {
mdl.TTL = st.TTL
@@ -2072,6 +2077,7 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) {
func APItoStats(tpST *utils.TPStats, timezone string) (st *StatQueueProfile, err error) {
st = &StatQueueProfile{
Tenant: tpST.Tenant,
ID: tpST.ID,
QueueLength: tpST.QueueLength,
Weight: tpST.Weight,

View File

@@ -713,6 +713,7 @@ func TestTpResourcesAsTpResources(t *testing.T) {
tps := []*TpResource{
&TpResource{
Tpid: "TEST_TPID",
Tenant: "Tester",
Tag: "ResGroup1",
FilterType: MetaStringPrefix,
FilterFieldName: "Destination",
@@ -726,12 +727,14 @@ func TestTpResourcesAsTpResources(t *testing.T) {
&TpResource{
Tpid: "TEST_TPID",
Tag: "ResGroup1",
Tenant: "Tester",
FilterType: MetaStringPrefix,
FilterFieldName: "Category",
FilterFieldValues: "call;inbound_call",
Thresholds: "WARN3"},
&TpResource{
Tpid: "TEST_TPID",
Tenant: "Tester",
Tag: "ResGroup2",
FilterType: MetaStringPrefix,
FilterFieldName: "Destination",
@@ -744,8 +747,9 @@ func TestTpResourcesAsTpResources(t *testing.T) {
}
eTPs := []*utils.TPResource{
&utils.TPResource{
TPid: tps[0].Tpid,
ID: tps[0].Tag,
TPid: tps[0].Tpid,
Tenant: tps[0].Tenant,
ID: tps[0].Tag,
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
Type: tps[0].FilterType,
@@ -768,8 +772,9 @@ func TestTpResourcesAsTpResources(t *testing.T) {
Thresholds: []string{"WARN_RES1", "WARN_RES2", "WARN3"},
},
&utils.TPResource{
TPid: tps[2].Tpid,
ID: tps[2].Tag,
TPid: tps[2].Tpid,
Tenant: tps[2].Tenant,
ID: tps[2].Tag,
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
Type: tps[2].FilterType,

View File

@@ -454,18 +454,19 @@ func (t TBLSMCosts) TableName() string {
type TpResource struct {
ID int64
Tpid string
Tag string `index:"0" re:""`
FilterType string `index:"1" re:"^\*[A-Za-z].*"`
FilterFieldName string `index:"2" re:""`
FilterFieldValues string `index:"3" re:""`
ActivationInterval string `index:"4" re:""`
UsageTTL string `index:"5" re:""`
Limit string `index:"6" re:""`
AllocationMessage string `index:"7" re:""`
Blocker bool `index:"8" re:""`
Stored bool `index:"9" re:""`
Weight float64 `index:"10" re:"\d+\.?\d*"`
Thresholds string `index:"11" re:""`
Tenant string `index:"0" re:""`
Tag string `index:"1" re:""`
FilterType string `index:"2" re:"^\*[A-Za-z].*"`
FilterFieldName string `index:"3" re:""`
FilterFieldValues string `index:"4" re:""`
ActivationInterval string `index:"5" re:""`
UsageTTL string `index:"6" re:""`
Limit string `index:"7" re:""`
AllocationMessage string `index:"8" re:""`
Blocker bool `index:"9" re:""`
Stored bool `index:"10" re:""`
Weight float64 `index:"11" re:"\d+\.?\d*"`
Thresholds string `index:"12" re:""`
CreatedAt time.Time
}
@@ -482,18 +483,19 @@ func (t TBLVersion) TableName() string {
type TpStats struct {
ID int64
Tpid string
Tag string `index:"0" re:""`
FilterType string `index:"1" re:"^\*[A-Za-z].*"`
FilterFieldName string `index:"2" re:""`
FilterFieldValues string `index:"3" re:""`
ActivationInterval string `index:"4" re:""`
QueueLength int `index:"5" re:""`
TTL string `index:"6" re:""`
Metrics string `index:"7" re:""`
Blocker bool `index:"8" re:""`
Stored bool `index:"9" re:""`
Weight float64 `index:"10" re:"\d+\.?\d*"`
Thresholds string `index:"11" re:""`
Tenant string `index:"0" re:""`
Tag string `index:"1" re:""`
FilterType string `index:"2" re:"^\*[A-Za-z].*"`
FilterFieldName string `index:"3" re:""`
FilterFieldValues string `index:"4" re:""`
ActivationInterval string `index:"5" re:""`
QueueLength int `index:"6" re:""`
TTL string `index:"7" re:""`
Metrics string `index:"8" re:""`
Blocker bool `index:"9" re:""`
Stored bool `index:"10" re:""`
Weight float64 `index:"11" re:"\d+\.?\d*"`
Thresholds string `index:"12" re:""`
CreatedAt time.Time
}

View File

@@ -39,6 +39,7 @@ func init() {
// ResourceProfile represents the user configuration for the resource
type ResourceProfile struct {
Tenant string
ID string // identifier of this resource
Filters []*RequestFilter // filters for the request
ActivationInterval *utils.ActivationInterval // time when this resource becomes active and expires
@@ -53,6 +54,7 @@ type ResourceProfile struct {
// ResourceUsage represents an usage counted
type ResourceUsage struct {
Tenant string
ID string // Unique identifier of this ResourceUsage, Eg: FreeSWITCH UUID
ExpiryTime time.Time
Units float64 // Number of units used
@@ -66,6 +68,7 @@ func (ru *ResourceUsage) isActive(atTime time.Time) bool {
// Resource represents a resource in the system
// not thread safe, needs locking at process level
type Resource struct {
Tenant string
ID string
Usages map[string]*ResourceUsage
TTLIdx []string // holds ordered list of ResourceIDs based on their TTL, empty if feature is disabled

View File

@@ -1540,8 +1540,9 @@ func testStorDBitCRUDTpStats(t *testing.T) {
//WRITE
eTPs := []*utils.TPStats{
&utils.TPStats{
TPid: "TEST_TPID",
ID: "Stats1",
TPid: "TEST_TPID",
Tenant: "Test",
ID: "Stats1",
Filters: []*utils.TPRequestFilter{
&utils.TPRequestFilter{
Type: "filtertype",

View File

@@ -1270,6 +1270,7 @@ type AttrSetBalance struct {
type TPResource struct {
TPid string
Tenant string
ID string // Identifier of this limit
Filters []*TPRequestFilter // Filters for the request
ActivationInterval *TPActivationInterval // Time when this limit becomes active/expires
@@ -1335,6 +1336,7 @@ type AttrDisconnectSession struct {
// TPStats is used in APIs to manage remotely offline Stats config
type TPStats struct {
TPid string
Tenant string
ID string
Filters []*TPRequestFilter
ActivationInterval *TPActivationInterval