From 3216d7b3cfe045e55213fcd25fa7eb82b83f3d4e Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Mon, 11 Sep 2017 09:42:30 +0000 Subject: [PATCH 01/10] Refactored dbsmerge_redis script --- data/scripts/migrator/dbsmerge_redis.py | 48 ++++++++++--------------- data/scripts/migrator/usage_mysql.py | 2 +- 2 files changed, 20 insertions(+), 30 deletions(-) diff --git a/data/scripts/migrator/dbsmerge_redis.py b/data/scripts/migrator/dbsmerge_redis.py index 907f89483..91a5d1ece 100755 --- a/data/scripts/migrator/dbsmerge_redis.py +++ b/data/scripts/migrator/dbsmerge_redis.py @@ -8,24 +8,28 @@ # behaviour: # ^ the script will not overwrite keys on the destination server/database -from_host = '127.0.0.1' -from_port = 6379 from_db = 11 -from_pass = '' - -to_host = '127.0.0.1' -to_port = 6379 to_db = 10 -to_pass = '' # Not used keymask = '*' timeout = 2000 import time import redis +import argparse +parser = argparse.ArgumentParser() +parser.add_argument("-host", "--host",default="127.0.0.1", help='default: "127.0.0.1"') +parser.add_argument("-port", "--port", type=int ,default=6379, help='default: 6379') +parser.add_argument("-pass", "--password", default="", help='default: ""') + +args = parser.parse_args() + +from_host = args.host +from_port = args.port +from_pass = args.password from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) -to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) +to_redis = redis.Redis(host = from_host, port = from_port, db = to_db) to_keys = to_redis.keys(keymask) from_keys = from_redis.keys(keymask) @@ -34,28 +38,14 @@ print('Found %d keys on destination.' % len(to_keys)) # keys found if len(from_keys) > 0: - # same server - if from_host == to_host and from_port == to_port: - print('Migrating on same server...') - i = 0 - for key in from_keys: - i += 1 - print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) - from_redis.execute_command('MOVE', key, to_db) + print('Migrating on same server...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) + from_redis.execute_command('MOVE', key, to_db) - # different servers - else: - print('Migrating between different servers...') - i = 0 - for key in from_keys: - i += 1 - print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))), - try: - from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout) - except redis.exceptions.ResponseError, e: - if not 'ERR Target key name is busy' in str(e): - raise e - print('Done.') + print('Done.') # done from_keys_after = from_redis.keys(keymask) to_keys_after = to_redis.keys(keymask) diff --git a/data/scripts/migrator/usage_mysql.py b/data/scripts/migrator/usage_mysql.py index 1401f1897..2d062c87c 100755 --- a/data/scripts/migrator/usage_mysql.py +++ b/data/scripts/migrator/usage_mysql.py @@ -3,7 +3,7 @@ # depends: # ^ mysql (debian: python-mysql.connector) -host = '127.0.0.1' +host = '192.168.100.40' port = 3306 database = 'cgrates' user = 'root' From 5a37c9fa94f9e27b19d65513a6243efa771087fa Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Mon, 11 Sep 2017 11:21:42 +0000 Subject: [PATCH 02/10] Reverted changes --- data/scripts/migrator/dbsmerge_mongo.py | 143 +++++++++--------------- 1 file changed, 55 insertions(+), 88 deletions(-) diff --git a/data/scripts/migrator/dbsmerge_mongo.py b/data/scripts/migrator/dbsmerge_mongo.py index ddc176fe8..ec3ed3b2a 100755 --- a/data/scripts/migrator/dbsmerge_mongo.py +++ b/data/scripts/migrator/dbsmerge_mongo.py @@ -1,101 +1,68 @@ -#!/usr/bin/python +#!/#!/usr/bin/python # depends: -# ^ pymongo # install via: easy_install pymongo +# ^ redis # install via easy_install redis +# asserts: +# ^ destination redis is not password protected when connected from source redis server +# (https://github.com/antirez/redis/pull/2507) # behaviour: -# ^ the script will "move" the collections if source and target server are the same -# but will "copy" (dump/restore) if source and target servers are different +# ^ the script will not overwrite keys on the destination server/database -from_host = '127.0.0.1' -from_port = '27017' -from_db = '11' -from_auth_db = 'cgrates' # Auth db on source server -from_user = 'cgrates' -from_pass = '' +from_host = '127.0.0.1' +from_port = 6379 +from_db = 11 +from_pass = '' -to_host = '127.0.0.1' -to_port = '27017' -to_db = '10' -to_auth_db = "cgrates" # Auth db on target server -to_user = 'cgrates' -to_pass = '' +to_host = '127.0.0.1' +to_port = 6379 +to_db = 10 +to_pass = '' # Not used -ignore_empty_cols = True -# Do not migrate collections with 0 document count. -# Works only if from/to is on same host. +keymask = '*' +timeout = 2000 -# Overwrite target collections flag. -# Works only if from/to is on same host. -# If from/to hosts are different we use mongorestore which overwrites by default. -drop_target = False +import time +import redis -dump_folder = 'dump' +from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) +to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) -import sys -from pymongo import MongoClient -from urllib import quote_plus -from collections import OrderedDict +to_keys = to_redis.keys(keymask) +from_keys = from_redis.keys(keymask) +print('Found %d keys on source.' % len(from_keys)) +print('Found %d keys on destination.' % len(to_keys)) -# same server -if from_host == to_host and from_port == to_port: +# keys found +if len(from_keys) > 0: + # same server + if from_host == to_host and from_port == to_port: print('Migrating on same server...') - mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db - if from_pass == '': # disabled auth - mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db - client = MongoClient(mongo_from_url) + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) + from_redis.execute_command('MOVE', key, to_db) - db = client[from_db] - cols = db.collection_names() - - # collections found - if len(cols) > 0: - print('Found %d collections on source. Moving...' % len(cols)) - i = 0 - for col in cols: - i += 1 - if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0): - print('Moving collection %s (%d of %d)...' % (col, i, len(cols))) - try: - client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)])) - except: - e = sys.exc_info()[0] - print(e) - else: - print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols))) - # no collections found - else: - print('No collections in source database.') - -# different servers + # different servers + else: + print('Migrating between different servers...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))), + try: + from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout) + except redis.exceptions.ResponseError, e: + if not 'ERR Target key name is busy' in str(e): + raise e + print('Done.') + # done + from_keys_after = from_redis.keys(keymask) + to_keys_after = to_redis.keys(keymask) + print('There are now %d keys on source.' % len(from_keys_after)) + print('There are now %d keys on destination.' % len(to_keys_after)) + print('%d keys were moved' % (len(to_keys_after) - len(to_keys))) + print('Migration complete.') +# no keys found else: - import subprocess - import os - import shutil - - print('Migrating between different servers...') - print('Dumping...') - out = subprocess.check_output([ - 'mongodump', - '--host', '%s' % from_host, - '-u', '%s' % from_user, - '-p', '%s' % from_pass, - '--authenticationDatabase', '%s' % from_auth_db, - '--db', '%s' % from_db, - '--port', '%s' % from_port, - '-o', '%s' % dump_folder, - ], stderr= subprocess.STDOUT) - print('Dump complete.') - - print('Restoring...') - out = subprocess.check_output([ - 'mongorestore', - '--host', '%s' % to_host, - '-u', '%s' % to_user, - '-p', '%s' % to_pass, - '--authenticationDatabase', '%s' % to_auth_db, - '--db', '%s' % to_db, - '--port', '%s' % to_port, - '--drop', '%s/%s' % (dump_folder, from_db), - ], stderr= subprocess.STDOUT) - print('Restore complete.') -print('Migration complete.') + print('No keys with keymask %s found in source database' % keymask) \ No newline at end of file From df31455316a420c0b09200646eca8c5b2aac46b6 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Mon, 11 Sep 2017 12:17:14 +0000 Subject: [PATCH 03/10] Added script to migrate --- data/scripts/migrator/dbsmerge_mongo.py | 143 +++++++++++------- data/scripts/migrator/dbsmerge_redis.py | 50 +++--- data/storage/migrator/dbsmerge_mongo.py | 101 +++++++++++++ data/storage/migrator/dbsmerge_redis.py | 68 +++++++++ data/storage/migrator/mysql_cdr_migration.sql | 58 +++++++ data/storage/migrator/mysql_tables_update.sql | 74 +++++++++ data/storage/migrator/pg_cdr_migration.sql | 43 ++++++ data/storage/migrator/pg_tables_update.sql | 79 ++++++++++ data/storage/migrator/rc7_to_rc8.sh | 56 +++++++ data/storage/migrator/usage_mysql.py | 40 +++++ data/storage/migrator/usage_postgres.py | 40 +++++ 11 files changed, 677 insertions(+), 75 deletions(-) create mode 100755 data/storage/migrator/dbsmerge_mongo.py create mode 100755 data/storage/migrator/dbsmerge_redis.py create mode 100755 data/storage/migrator/mysql_cdr_migration.sql create mode 100755 data/storage/migrator/mysql_tables_update.sql create mode 100755 data/storage/migrator/pg_cdr_migration.sql create mode 100755 data/storage/migrator/pg_tables_update.sql create mode 100755 data/storage/migrator/rc7_to_rc8.sh create mode 100755 data/storage/migrator/usage_mysql.py create mode 100755 data/storage/migrator/usage_postgres.py diff --git a/data/scripts/migrator/dbsmerge_mongo.py b/data/scripts/migrator/dbsmerge_mongo.py index ec3ed3b2a..ddc176fe8 100755 --- a/data/scripts/migrator/dbsmerge_mongo.py +++ b/data/scripts/migrator/dbsmerge_mongo.py @@ -1,68 +1,101 @@ -#!/#!/usr/bin/python +#!/usr/bin/python # depends: -# ^ redis # install via easy_install redis -# asserts: -# ^ destination redis is not password protected when connected from source redis server -# (https://github.com/antirez/redis/pull/2507) +# ^ pymongo # install via: easy_install pymongo # behaviour: -# ^ the script will not overwrite keys on the destination server/database +# ^ the script will "move" the collections if source and target server are the same +# but will "copy" (dump/restore) if source and target servers are different -from_host = '127.0.0.1' -from_port = 6379 -from_db = 11 -from_pass = '' +from_host = '127.0.0.1' +from_port = '27017' +from_db = '11' +from_auth_db = 'cgrates' # Auth db on source server +from_user = 'cgrates' +from_pass = '' -to_host = '127.0.0.1' -to_port = 6379 -to_db = 10 -to_pass = '' # Not used +to_host = '127.0.0.1' +to_port = '27017' +to_db = '10' +to_auth_db = "cgrates" # Auth db on target server +to_user = 'cgrates' +to_pass = '' -keymask = '*' -timeout = 2000 +ignore_empty_cols = True +# Do not migrate collections with 0 document count. +# Works only if from/to is on same host. -import time -import redis +# Overwrite target collections flag. +# Works only if from/to is on same host. +# If from/to hosts are different we use mongorestore which overwrites by default. +drop_target = False -from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) -to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) +dump_folder = 'dump' -to_keys = to_redis.keys(keymask) -from_keys = from_redis.keys(keymask) -print('Found %d keys on source.' % len(from_keys)) -print('Found %d keys on destination.' % len(to_keys)) +import sys +from pymongo import MongoClient +from urllib import quote_plus +from collections import OrderedDict -# keys found -if len(from_keys) > 0: - # same server - if from_host == to_host and from_port == to_port: +# same server +if from_host == to_host and from_port == to_port: print('Migrating on same server...') - i = 0 - for key in from_keys: - i += 1 - print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) - from_redis.execute_command('MOVE', key, to_db) + mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db + if from_pass == '': # disabled auth + mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db + client = MongoClient(mongo_from_url) - # different servers - else: - print('Migrating between different servers...') - i = 0 - for key in from_keys: - i += 1 - print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))), - try: - from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout) - except redis.exceptions.ResponseError, e: - if not 'ERR Target key name is busy' in str(e): - raise e - print('Done.') - # done - from_keys_after = from_redis.keys(keymask) - to_keys_after = to_redis.keys(keymask) - print('There are now %d keys on source.' % len(from_keys_after)) - print('There are now %d keys on destination.' % len(to_keys_after)) - print('%d keys were moved' % (len(to_keys_after) - len(to_keys))) - print('Migration complete.') -# no keys found + db = client[from_db] + cols = db.collection_names() + + # collections found + if len(cols) > 0: + print('Found %d collections on source. Moving...' % len(cols)) + i = 0 + for col in cols: + i += 1 + if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0): + print('Moving collection %s (%d of %d)...' % (col, i, len(cols))) + try: + client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)])) + except: + e = sys.exc_info()[0] + print(e) + else: + print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols))) + # no collections found + else: + print('No collections in source database.') + +# different servers else: - print('No keys with keymask %s found in source database' % keymask) \ No newline at end of file + import subprocess + import os + import shutil + + print('Migrating between different servers...') + print('Dumping...') + out = subprocess.check_output([ + 'mongodump', + '--host', '%s' % from_host, + '-u', '%s' % from_user, + '-p', '%s' % from_pass, + '--authenticationDatabase', '%s' % from_auth_db, + '--db', '%s' % from_db, + '--port', '%s' % from_port, + '-o', '%s' % dump_folder, + ], stderr= subprocess.STDOUT) + print('Dump complete.') + + print('Restoring...') + out = subprocess.check_output([ + 'mongorestore', + '--host', '%s' % to_host, + '-u', '%s' % to_user, + '-p', '%s' % to_pass, + '--authenticationDatabase', '%s' % to_auth_db, + '--db', '%s' % to_db, + '--port', '%s' % to_port, + '--drop', '%s/%s' % (dump_folder, from_db), + ], stderr= subprocess.STDOUT) + print('Restore complete.') +print('Migration complete.') diff --git a/data/scripts/migrator/dbsmerge_redis.py b/data/scripts/migrator/dbsmerge_redis.py index 91a5d1ece..454ef6b7c 100755 --- a/data/scripts/migrator/dbsmerge_redis.py +++ b/data/scripts/migrator/dbsmerge_redis.py @@ -8,28 +8,24 @@ # behaviour: # ^ the script will not overwrite keys on the destination server/database +from_host = '127.0.0.1' +from_port = 6379 from_db = 11 +from_pass = '' + +to_host = '127.0.0.1' +to_port = 6379 to_db = 10 +to_pass = '' # Not used keymask = '*' timeout = 2000 import time import redis -import argparse -parser = argparse.ArgumentParser() -parser.add_argument("-host", "--host",default="127.0.0.1", help='default: "127.0.0.1"') -parser.add_argument("-port", "--port", type=int ,default=6379, help='default: 6379') -parser.add_argument("-pass", "--password", default="", help='default: ""') - -args = parser.parse_args() - -from_host = args.host -from_port = args.port -from_pass = args.password from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) -to_redis = redis.Redis(host = from_host, port = from_port, db = to_db) +to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) to_keys = to_redis.keys(keymask) from_keys = from_redis.keys(keymask) @@ -38,14 +34,28 @@ print('Found %d keys on destination.' % len(to_keys)) # keys found if len(from_keys) > 0: - print('Migrating on same server...') - i = 0 - for key in from_keys: - i += 1 - print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) - from_redis.execute_command('MOVE', key, to_db) + # same server + if from_host == to_host and from_port == to_port: + print('Migrating on same server...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) + from_redis.execute_command('MOVE', key, to_db) - print('Done.') + # different servers + else: + print('Migrating between different servers...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))), + try: + from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout) + except redis.exceptions.ResponseError, e: + if not 'ERR Target key name is busy' in str(e): + raise e + print('Done.') # done from_keys_after = from_redis.keys(keymask) to_keys_after = to_redis.keys(keymask) @@ -55,4 +65,4 @@ if len(from_keys) > 0: print('Migration complete.') # no keys found else: - print('No keys with keymask %s found in source database' % keymask) + print('No keys with keymask %s found in source database' % keymask) \ No newline at end of file diff --git a/data/storage/migrator/dbsmerge_mongo.py b/data/storage/migrator/dbsmerge_mongo.py new file mode 100755 index 000000000..ddc176fe8 --- /dev/null +++ b/data/storage/migrator/dbsmerge_mongo.py @@ -0,0 +1,101 @@ +#!/usr/bin/python + +# depends: +# ^ pymongo # install via: easy_install pymongo +# behaviour: +# ^ the script will "move" the collections if source and target server are the same +# but will "copy" (dump/restore) if source and target servers are different + +from_host = '127.0.0.1' +from_port = '27017' +from_db = '11' +from_auth_db = 'cgrates' # Auth db on source server +from_user = 'cgrates' +from_pass = '' + +to_host = '127.0.0.1' +to_port = '27017' +to_db = '10' +to_auth_db = "cgrates" # Auth db on target server +to_user = 'cgrates' +to_pass = '' + +ignore_empty_cols = True +# Do not migrate collections with 0 document count. +# Works only if from/to is on same host. + +# Overwrite target collections flag. +# Works only if from/to is on same host. +# If from/to hosts are different we use mongorestore which overwrites by default. +drop_target = False + +dump_folder = 'dump' + +import sys +from pymongo import MongoClient +from urllib import quote_plus +from collections import OrderedDict + +# same server +if from_host == to_host and from_port == to_port: + print('Migrating on same server...') + mongo_from_url = 'mongodb://' + from_user + ':' + quote_plus(from_pass) + '@'+ from_host + ':' + from_port + '/' + from_auth_db + if from_pass == '': # disabled auth + mongo_from_url = 'mongodb://' + from_host + ':' + from_port + '/' + from_db + client = MongoClient(mongo_from_url) + + db = client[from_db] + cols = db.collection_names() + + # collections found + if len(cols) > 0: + print('Found %d collections on source. Moving...' % len(cols)) + i = 0 + for col in cols: + i += 1 + if not ignore_empty_cols or (ignore_empty_cols and db[col].count() > 0): + print('Moving collection %s (%d of %d)...' % (col, i, len(cols))) + try: + client.admin.command(OrderedDict([('renameCollection', from_db + '.' + col), ('to', to_db + '.' + col), ('dropTarget', drop_target)])) + except: + e = sys.exc_info()[0] + print(e) + else: + print('Skipping empty collection %s (%d of %d)...' % (col, i, len(cols))) + # no collections found + else: + print('No collections in source database.') + +# different servers +else: + import subprocess + import os + import shutil + + print('Migrating between different servers...') + print('Dumping...') + out = subprocess.check_output([ + 'mongodump', + '--host', '%s' % from_host, + '-u', '%s' % from_user, + '-p', '%s' % from_pass, + '--authenticationDatabase', '%s' % from_auth_db, + '--db', '%s' % from_db, + '--port', '%s' % from_port, + '-o', '%s' % dump_folder, + ], stderr= subprocess.STDOUT) + print('Dump complete.') + + print('Restoring...') + out = subprocess.check_output([ + 'mongorestore', + '--host', '%s' % to_host, + '-u', '%s' % to_user, + '-p', '%s' % to_pass, + '--authenticationDatabase', '%s' % to_auth_db, + '--db', '%s' % to_db, + '--port', '%s' % to_port, + '--drop', '%s/%s' % (dump_folder, from_db), + ], stderr= subprocess.STDOUT) + print('Restore complete.') +print('Migration complete.') diff --git a/data/storage/migrator/dbsmerge_redis.py b/data/storage/migrator/dbsmerge_redis.py new file mode 100755 index 000000000..b3a0e2daa --- /dev/null +++ b/data/storage/migrator/dbsmerge_redis.py @@ -0,0 +1,68 @@ +#!/usr/bin/python + +# depends: +# ^ redis # install via easy_install redis +# asserts: +# ^ destination redis is not password protected when connected from source redis server +# (https://github.com/antirez/redis/pull/2507) +# behaviour: +# ^ the script will not overwrite keys on the destination server/database + +from_host = '192.168.100.40' +from_port = 6379 +from_db = 11 +from_pass = '' + +to_host = '192.168.100.40' +to_port = 6379 +to_db = 10 +to_pass = '' # Not used + +keymask = '*' +timeout = 2000 + +import time +import redis + +from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) +to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) + +to_keys = to_redis.keys(keymask) +from_keys = from_redis.keys(keymask) +print('Found %d keys on source.' % len(from_keys)) +print('Found %d keys on destination.' % len(to_keys)) + +# keys found +if len(from_keys) > 0: + # same server + if from_host == to_host and from_port == to_port: + print('Migrating on same server...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))) + from_redis.execute_command('MOVE', key, to_db) + + # different servers + else: + print('Migrating between different servers...') + i = 0 + for key in from_keys: + i += 1 + print('Moving key %s (%d of %d)...' % (key, i, len(from_keys))), + try: + from_redis.execute_command('MIGRATE', to_host, to_port, key, to_db, timeout) + except redis.exceptions.ResponseError, e: + if not 'ERR Target key name is busy' in str(e): + raise e + print('Done.') + # done + from_keys_after = from_redis.keys(keymask) + to_keys_after = to_redis.keys(keymask) + print('There are now %d keys on source.' % len(from_keys_after)) + print('There are now %d keys on destination.' % len(to_keys_after)) + print('%d keys were moved' % (len(to_keys_after) - len(to_keys))) + print('Migration complete.') +# no keys found +else: + print('No keys with keymask %s found in source database' % keymask) \ No newline at end of file diff --git a/data/storage/migrator/mysql_cdr_migration.sql b/data/storage/migrator/mysql_cdr_migration.sql new file mode 100755 index 000000000..d1cb6bfd3 --- /dev/null +++ b/data/storage/migrator/mysql_cdr_migration.sql @@ -0,0 +1,58 @@ +/* +This script will migrate CDRs from the old CGRateS tables to the new cdrs table +but it only migrate CDRs where the duration is > 0. +If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage +Also the script will process 10,000 CDRs before committing to save system resources +especially in systems where they are millions of CDRs to be migrated +You can increase or lower the value of step in the line after BEGIN below. + +You have to use 'CALL cgrates.migration();' to execute the script. If named other then default use that database name. +*/ + + +DELIMITER // + +CREATE PROCEDURE `migration`() +BEGIN + /* DECLARE variables */ + DECLARE max_cdrs bigint; + DECLARE start_id bigint; + DECLARE end_id bigint; + DECLARE step bigint; + /* Optimize table for performance */ + ALTER TABLE cdrs DISABLE KEYS; + SET autocommit=0; + SET unique_checks=0; + SET foreign_key_checks=0; + /* You must change the step var to commit every step rows inserted */ + SET step := 10000; + SET start_id := 0; + SET end_id := start_id + step; + SET max_cdrs = (select max(id) from rated_cdrs); + WHILE (start_id <= max_cdrs) DO + INSERT INTO + cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,`usage`,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at) + SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.`usage`,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at + FROM rated_cdrs + INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid + INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid + INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid + WHERE cdrs_primary.`usage` > '0' + AND not exists (select 1 from cdrs where cdrs.cgrid = cdrs_primary.cgrid AND cdrs.run_id=rated_cdrs.runid) + AND rated_cdrs.id >= start_id + AND rated_cdrs.id < end_id + GROUP BY cgrid, run_id, origin_id; + SET start_id = start_id + step; + SET end_id = end_id + step; + END WHILE; + /* SET Table for live usage */ + SET autocommit=1; + SET unique_checks=1; + SET foreign_key_checks=1; + ALTER TABLE cdrs ENABLE KEYS; + OPTIMIZE TABLE cdrs; +END // + +DELIMITER ; + +CALL cgrates.migration(); \ No newline at end of file diff --git a/data/storage/migrator/mysql_tables_update.sql b/data/storage/migrator/mysql_tables_update.sql new file mode 100755 index 000000000..7cbeaf335 --- /dev/null +++ b/data/storage/migrator/mysql_tables_update.sql @@ -0,0 +1,74 @@ +use cgrates; +-- +-- Table structure for table `cdrs` +-- + +DROP TABLE IF EXISTS cdrs; +CREATE TABLE cdrs ( + id int(11) NOT NULL AUTO_INCREMENT, + cgrid char(40) NOT NULL, + run_id varchar(64) NOT NULL, + origin_host varchar(64) NOT NULL, + source varchar(64) NOT NULL, + origin_id varchar(64) NOT NULL, + tor varchar(16) NOT NULL, + request_type varchar(24) NOT NULL, + direction varchar(8) NOT NULL, + tenant varchar(64) NOT NULL, + category varchar(32) NOT NULL, + account varchar(128) NOT NULL, + subject varchar(128) NOT NULL, + destination varchar(128) NOT NULL, + setup_time datetime NOT NULL, + pdd DECIMAL(12,9) NOT NULL, + answer_time datetime NOT NULL, + `usage` DECIMAL(30,9) NOT NULL, + supplier varchar(128) NOT NULL, + disconnect_cause varchar(64) NOT NULL, + extra_fields text NOT NULL, + cost_source varchar(64) NOT NULL, + cost DECIMAL(20,4) NOT NULL, + cost_details text, + account_summary text, + extra_info text, + created_at TIMESTAMP NULL, + updated_at TIMESTAMP NULL, + deleted_at TIMESTAMP NULL, + PRIMARY KEY (id), + UNIQUE KEY cdrrun (cgrid, run_id, origin_id) +); + +DROP TABLE IF EXISTS sm_costs; +CREATE TABLE sm_costs ( + id int(11) NOT NULL AUTO_INCREMENT, + cgrid char(40) NOT NULL, + run_id varchar(64) NOT NULL, + origin_host varchar(64) NOT NULL, + origin_id varchar(64) NOT NULL, + cost_source varchar(64) NOT NULL, + `usage` DECIMAL(30,9) NOT NULL, + cost_details text, + created_at TIMESTAMP NULL, + deleted_at TIMESTAMP NULL, + PRIMARY KEY (`id`), + UNIQUE KEY costid (cgrid, run_id), + KEY origin_idx (origin_host, origin_id), + KEY run_origin_idx (run_id, origin_id), + KEY deleted_at_idx (deleted_at) +); + +-- +-- Table structure for table `versions` +-- + +DROP TABLE IF EXISTS versions; +CREATE TABLE versions ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `item` varchar(64) NOT NULL, + `version` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `item` (`item`) +); + + + diff --git a/data/storage/migrator/pg_cdr_migration.sql b/data/storage/migrator/pg_cdr_migration.sql new file mode 100755 index 000000000..942bfc033 --- /dev/null +++ b/data/storage/migrator/pg_cdr_migration.sql @@ -0,0 +1,43 @@ +/* +This script will migrate CDRs from the old CGRateS tables to the new cdrs table +but it only migrate CDRs where the duration is > 0. +If you need CDRs also with duration is = 0 you can make the appropriate change in the line beginning WHERE cdrs_primary.usage + +Also the script will process 10,000 CDRs before committing to save system resources +especially in systems where they are millions of CDRs to be migrated +You can increase or lower the value of step in the line after BEGIN below. +*/ + + +DO $$ +DECLARE + max_cdrs bigint; + start_id bigint; + end_id bigint; + step bigint; +BEGIN + /* You must change the step var to commit every step rows inserted */ + step := 10000; + start_id := 0; + end_id := start_id + step; + select max(id) INTO max_cdrs from rated_cdrs; + WHILE start_id <= max_cdrs + LOOP + --RAISE NOTICE '%', (to_char(start_id, '99999999') || '-' || to_char(end_id, '99999999')); + INSERT INTO + cdrs(cgrid,run_id,origin_host,source,origin_id,tor,request_type,direction,tenant,category,account,subject,destination,setup_time,pdd,answer_time,usage,supplier,disconnect_cause,extra_fields,cost_source,cost,cost_details,extra_info, created_at, updated_at, deleted_at) + SELECT cdrs_primary.cgrid,rated_cdrs.runid as run_id,cdrs_primary.cdrhost as origin_host,cdrs_primary.cdrsource as source,cdrs_primary.accid as origin_id, cdrs_primary.tor,rated_cdrs.reqtype as request_type,rated_cdrs.direction, rated_cdrs.tenant,rated_cdrs.category, rated_cdrs.account, rated_cdrs.subject, rated_cdrs.destination,rated_cdrs.setup_time,rated_cdrs.pdd,rated_cdrs.answer_time,rated_cdrs.usage,rated_cdrs.supplier,rated_cdrs.disconnect_cause,cdrs_extra.extra_fields,cost_details.cost_source,rated_cdrs.cost,cost_details.timespans as cost_details,rated_cdrs.extra_info,rated_cdrs.created_at,rated_cdrs.updated_at, rated_cdrs.deleted_at + FROM rated_cdrs + INNER JOIN cdrs_primary ON rated_cdrs.cgrid = cdrs_primary.cgrid + INNER JOIN cdrs_extra ON rated_cdrs.cgrid = cdrs_extra.cgrid + INNER JOIN cost_details ON rated_cdrs.cgrid = cost_details.cgrid + WHERE cdrs_primary.usage > '0' + AND not exists (select 1 from cdrs c where c.cgrid = cdrs_primary.cgrid) + AND rated_cdrs.id >= start_id + AND rated_cdrs.id < end_id + ; + start_id = start_id + step; + end_id = end_id + step; + END LOOP; +END +$$; diff --git a/data/storage/migrator/pg_tables_update.sql b/data/storage/migrator/pg_tables_update.sql new file mode 100755 index 000000000..05437783e --- /dev/null +++ b/data/storage/migrator/pg_tables_update.sql @@ -0,0 +1,79 @@ +-- +-- Table structure for table `cdrs` +-- + +DROP TABLE IF EXISTS cdrs; +CREATE TABLE cdrs ( + id SERIAL PRIMARY KEY, + cgrid CHAR(40) NOT NULL, + run_id VARCHAR(64) NOT NULL, + origin_host VARCHAR(64) NOT NULL, + source VARCHAR(64) NOT NULL, + origin_id VARCHAR(64) NOT NULL, + tor VARCHAR(16) NOT NULL, + request_type VARCHAR(24) NOT NULL, + direction VARCHAR(8) NOT NULL, + tenant VARCHAR(64) NOT NULL, + category VARCHAR(32) NOT NULL, + account VARCHAR(128) NOT NULL, + subject VARCHAR(128) NOT NULL, + destination VARCHAR(128) NOT NULL, + setup_time TIMESTAMP WITH TIME ZONE NOT NULL, + pdd NUMERIC(12,9) NOT NULL, + answer_time TIMESTAMP WITH TIME ZONE NOT NULL, + usage NUMERIC(30,9) NOT NULL, + supplier VARCHAR(128) NOT NULL, + disconnect_cause VARCHAR(64) NOT NULL, + extra_fields jsonb NOT NULL, + cost_source VARCHAR(64) NOT NULL, + cost NUMERIC(20,4) DEFAULT NULL, + cost_details jsonb, + account_summary jsonb, + extra_info text, + created_at TIMESTAMP WITH TIME ZONE, + updated_at TIMESTAMP WITH TIME ZONE NULL, + deleted_at TIMESTAMP WITH TIME ZONE NULL, + UNIQUE (cgrid, run_id, origin_id) +); +; +DROP INDEX IF EXISTS deleted_at_cp_idx; +CREATE INDEX deleted_at_cp_idx ON cdrs (deleted_at); + + +DROP TABLE IF EXISTS sm_costs; +CREATE TABLE sm_costs ( + id SERIAL PRIMARY KEY, + cgrid CHAR(40) NOT NULL, + run_id VARCHAR(64) NOT NULL, + origin_host VARCHAR(64) NOT NULL, + origin_id VARCHAR(64) NOT NULL, + cost_source VARCHAR(64) NOT NULL, + usage NUMERIC(30,9) NOT NULL, + cost_details jsonb, + created_at TIMESTAMP WITH TIME ZONE, + deleted_at TIMESTAMP WITH TIME ZONE NULL, + UNIQUE (cgrid, run_id) +); +DROP INDEX IF EXISTS cgrid_smcost_idx; +CREATE INDEX cgrid_smcost_idx ON sm_costs (cgrid, run_id); +DROP INDEX IF EXISTS origin_smcost_idx; +CREATE INDEX origin_smcost_idx ON sm_costs (origin_host, origin_id); +DROP INDEX IF EXISTS run_origin_smcost_idx; +CREATE INDEX run_origin_smcost_idx ON sm_costs (run_id, origin_id); +DROP INDEX IF EXISTS deleted_at_smcost_idx; +CREATE INDEX deleted_at_smcost_idx ON sm_costs (deleted_at); + +-- +-- Table structure for table `versions` +-- + +DROP TABLE IF EXISTS versions; +CREATE TABLE versions ( + "id" SERIAL PRIMARY KEY, + "item" varchar(64) NOT NULL, + "version" INTEGER NOT NULL, + UNIQUE (item) +); + + + diff --git a/data/storage/migrator/rc7_to_rc8.sh b/data/storage/migrator/rc7_to_rc8.sh new file mode 100755 index 000000000..f5e3c24ee --- /dev/null +++ b/data/storage/migrator/rc7_to_rc8.sh @@ -0,0 +1,56 @@ +#! /usr/bin/env sh + +if test $# -lt 2; then + echo "" + echo "setup_cgr_db.sh " + echo "" + exit 0 +fi + + +user=$3 +if [ -z "$3" ]; then + user="cgrates" +fi + +host=$4 +if [ -z "$4" ]; then + host="localhost" +fi +export PGPASSWORD="CGRateS.org" + +DIR="$(dirname "$(readlink -f "$0")")" + + +case $1 in +"redis") +./dbsmerge_redis.py +;; +"mongo") +./dbsmerge_mongo.py +;; +esac + +case $2 in + "mysql") +mysql -u$user -p$PGPASSWORD -h $host < "$DIR"/mysql_tables_update.sql +up=$? +mysql -u$user -p$PGPASSWORD -h $host -D cgrates < "$DIR"/mysql_cdr_migration.sql +mig=$? +#./usage_mysql.py What's the point of those changes? +;; +"postgres") +psql -U $user -h $host -d cgrates -f "$DIR"/pq_tables_update.sql +up=$? +psql -U $user -h $host -d cgrates -f "$DIR"/pg_cdr_migration.sql +mig=$? +#./usage_postgres.py What's the point of those changes? +;; +esac + +if [ $up = 0 ] && [ $mig = 0 ]; then + echo -e "\n\t+++ CGR-DB successfully set-up! +++\n" + exit 0 +fi + + diff --git a/data/storage/migrator/usage_mysql.py b/data/storage/migrator/usage_mysql.py new file mode 100755 index 000000000..2d062c87c --- /dev/null +++ b/data/storage/migrator/usage_mysql.py @@ -0,0 +1,40 @@ +#!/usr/bin/python + +# depends: +# ^ mysql (debian: python-mysql.connector) + +host = '192.168.100.40' +port = 3306 +database = 'cgrates' +user = 'root' +password = 'CGRateS.org' + +import mysql.connector + +config = { + 'user': user, + 'password': password, + 'host': host, + 'port': port, + 'database': database, +} + +print('Connecting to MySQL...') +cnx = mysql.connector.connect(**config) +cursor = cnx.cursor() + +print('Renaming old column...') +cursor.execute('ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9)') + +print('Adding new column...') +cursor.execute('ALTER TABLE cdrs ADD `usage` DECIMAL(30)') + +print('Setting new values...') +cursor.execute('UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL') + +print('Deleting old column...') +cursor.execute('ALTER TABLEX cdrs DROP COLUMN usage_old') + +print('Closing MySQL connection...') +cnx.close() + diff --git a/data/storage/migrator/usage_postgres.py b/data/storage/migrator/usage_postgres.py new file mode 100755 index 000000000..621321f8a --- /dev/null +++ b/data/storage/migrator/usage_postgres.py @@ -0,0 +1,40 @@ +#!/usr/bin/python + +# depends: +# ^ psycopg2 (debian: python-psycopg2) + +host = '127.0.0.1' +port = 5432 +database = 'cgrates' +user = 'cgrates' +password = 'CGRateS.org' + +import psycopg2 + +print('Connecting to PostgreSQL...') +cnx = psycopg2.connect( + host=host, + port=port, + dbname=database, + user=user, + password=password + ) +cursor = cnx.cursor() + +print('Renaming old column...') +cursor.execute('ALTER TABLE cdrs RENAME COLUMN usage to usage_old') + +print('Adding new column...') +cursor.execute('ALTER TABLE cdrs ADD usage NUMERIC(30)') + +print('Setting new values...') +cursor.execute('UPDATE cdrs SET usage = usage_old * 1000000000 WHERE usage_old IS NOT NULL') + +print('Deleting old column...') +cursor.execute('ALTER TABLE cdrs DROP COLUMN usage_old') + +print('Commiting...') +cnx.commit() + +print('Closing PostgreSQL connection...') +cnx.close() From 40cd82dc263fdaffe49a1c2a963f183dbcd82883 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Tue, 12 Sep 2017 08:45:53 +0000 Subject: [PATCH 04/10] Added Tenant field to TPstats --- apier/v1/tpstats.go | 2 + apier/v1/tpstats_it_test.go | 1 + data/storage/migrator/dbsmerge_mongo.py | 25 +++--- data/storage/migrator/dbsmerge_redis.py | 13 +++ data/storage/migrator/rc7_to_rc8.sh | 84 +++++++++++++++---- .../mysql/create_tariffplan_tables.sql | 1 + .../postgres/create_tariffplan_tables.sql | 1 + data/tariffplans/testtp/Stats.csv | 4 +- data/tariffplans/tutorial/Stats.csv | 4 +- engine/loader_csv_test.go | 6 +- engine/model_helpers.go | 3 + engine/models.go | 25 +++--- engine/storage_sql.go | 2 +- engine/stordb_it_test.go | 1 + utils/apitpdata.go | 1 + 15 files changed, 125 insertions(+), 48 deletions(-) diff --git a/apier/v1/tpstats.go b/apier/v1/tpstats.go index 393fb13dd..b8042f63f 100644 --- a/apier/v1/tpstats.go +++ b/apier/v1/tpstats.go @@ -36,6 +36,7 @@ func (self *ApierV1) SetTPStat(attr utils.TPStats, reply *string) error { type AttrGetTPStat struct { TPid string // Tariff plan id + Tenant string ID string } @@ -57,6 +58,7 @@ func (self *ApierV1) GetTPStat(attr AttrGetTPStat, reply *utils.TPStats) error { type AttrGetTPStatIds struct { TPid string // Tariff plan id + Tenant string utils.Paginator } diff --git a/apier/v1/tpstats_it_test.go b/apier/v1/tpstats_it_test.go index 12fefc3a3..39c1539bc 100644 --- a/apier/v1/tpstats_it_test.go +++ b/apier/v1/tpstats_it_test.go @@ -126,6 +126,7 @@ func testTPStatsGetTPStatBeforeSet(t *testing.T) { func testTPStatsSetTPStat(t *testing.T) { tpStat = &utils.TPStats{ + Tenant: "Tester", TPid: "TPS1", ID: "Stat1", Filters: []*utils.TPRequestFilter{ diff --git a/data/storage/migrator/dbsmerge_mongo.py b/data/storage/migrator/dbsmerge_mongo.py index ddc176fe8..181dd90e6 100755 --- a/data/storage/migrator/dbsmerge_mongo.py +++ b/data/storage/migrator/dbsmerge_mongo.py @@ -6,19 +6,20 @@ # ^ the script will "move" the collections if source and target server are the same # but will "copy" (dump/restore) if source and target servers are different -from_host = '127.0.0.1' -from_port = '27017' -from_db = '11' -from_auth_db = 'cgrates' # Auth db on source server -from_user = 'cgrates' -from_pass = '' -to_host = '127.0.0.1' -to_port = '27017' -to_db = '10' -to_auth_db = "cgrates" # Auth db on target server -to_user = 'cgrates' -to_pass = '' +from_host =os.environ["from_host"] +from_port =os.environ["from_port"] +from_auth_db =os.environ["from_auth_db"] +from_user =os.environ["from_user"] +from_pass =os.environ["from_pass"] + +to_host =os.environ["to_host"] +to_port =os.environ["to_port"] +to_auth_db =os.environ["to_auth_db"] +to_user =os.environ["to_user"] +to_pass =os.environ["to_pass"] + + ignore_empty_cols = True # Do not migrate collections with 0 document count. diff --git a/data/storage/migrator/dbsmerge_redis.py b/data/storage/migrator/dbsmerge_redis.py index b3a0e2daa..c72a1dcb6 100755 --- a/data/storage/migrator/dbsmerge_redis.py +++ b/data/storage/migrator/dbsmerge_redis.py @@ -23,6 +23,19 @@ timeout = 2000 import time import redis +import os + +from_host =os.environ["from_host"] +from_port =os.environ["from_port"] +from_db =os.environ["from_db"] +from_pass =os.environ["from_pass"] + +to_host =os.environ["to_host"] +to_port =os.environ["to_port"] +to_db =os.environ["to_db"] +to_pass =os.environ["to_pass"] + + from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) diff --git a/data/storage/migrator/rc7_to_rc8.sh b/data/storage/migrator/rc7_to_rc8.sh index f5e3c24ee..581ff54fb 100755 --- a/data/storage/migrator/rc7_to_rc8.sh +++ b/data/storage/migrator/rc7_to_rc8.sh @@ -1,28 +1,80 @@ #! /usr/bin/env sh -if test $# -lt 2; then - echo "" - echo "setup_cgr_db.sh " - echo "" - exit 0 +#settings + + echo "" + echo "rc7_to_rc8.sh" + echo "" + +$datadb="redis" + +if [$datadb="redis"];then +#Redis Config +export from_host = '192.168.100.40' +export from_port = 6379 +export from_db = 11 +export from_pass = '' + +export to_host = '192.168.100.40' +export to_port = 6379 +export to_db = 10 +export to_pass = '' # Not used + +else if [$datadb="mongo"];then +#Mongo Config +export from_host = '127.0.0.1' +export from_port = '27017' +export from_db = '11' +export from_auth_db = 'cgrates' # Auth db on source server +export from_user = 'cgrates' +export from_pass = '' + +export to_host = '127.0.0.1' +export to_port = '27017' +export to_db = '10' +export to_auth_db = "cgrates" # Auth db on target server +export to_user = 'cgrates' +export to_pass = '' fi -user=$3 -if [ -z "$3" ]; then - user="cgrates" + + +if [$stordb="redis"];then +#Redis Config +export from_host = '192.168.100.40' +export from_port = 6379 +export from_db = 11 +export from_pass = '' + +export to_host = '192.168.100.40' +export to_port = 6379 +export to_db = 10 +export to_pass = '' # Not used + +else if [$stordb="mysql"];then +#Mongo Config +export from_host = '127.0.0.1' +export from_port = '27017' +export from_db = '11' +export from_auth_db = 'cgrates' # Auth db on source server +export from_user = 'cgrates' +export from_pass = '' + +export to_host = '127.0.0.1' +export to_port = '27017' +export to_db = '10' +export to_auth_db = "cgrates" # Auth db on target server +export to_user = 'cgrates' +export to_pass = '' fi -host=$4 -if [ -z "$4" ]; then - host="localhost" -fi -export PGPASSWORD="CGRateS.org" + DIR="$(dirname "$(readlink -f "$0")")" -case $1 in +case $datadb in "redis") ./dbsmerge_redis.py ;; @@ -31,20 +83,18 @@ case $1 in ;; esac -case $2 in +case $stordb in "mysql") mysql -u$user -p$PGPASSWORD -h $host < "$DIR"/mysql_tables_update.sql up=$? mysql -u$user -p$PGPASSWORD -h $host -D cgrates < "$DIR"/mysql_cdr_migration.sql mig=$? -#./usage_mysql.py What's the point of those changes? ;; "postgres") psql -U $user -h $host -d cgrates -f "$DIR"/pq_tables_update.sql up=$? psql -U $user -h $host -d cgrates -f "$DIR"/pg_cdr_migration.sql mig=$? -#./usage_postgres.py What's the point of those changes? ;; esac diff --git a/data/storage/mysql/create_tariffplan_tables.sql b/data/storage/mysql/create_tariffplan_tables.sql index 2682c82de..5f74cd89e 100644 --- a/data/storage/mysql/create_tariffplan_tables.sql +++ b/data/storage/mysql/create_tariffplan_tables.sql @@ -423,6 +423,7 @@ CREATE TABLE tp_resources ( DROP TABLE IF EXISTS tp_stats; CREATE TABLE tp_stats ( + `tenant` varchar(64) NOT NULL, `id` int(11) NOT NULL AUTO_INCREMENT, `tpid` varchar(64) NOT NULL, `tag` varchar(64) NOT NULL, diff --git a/data/storage/postgres/create_tariffplan_tables.sql b/data/storage/postgres/create_tariffplan_tables.sql index 28098a123..5bb88688c 100644 --- a/data/storage/postgres/create_tariffplan_tables.sql +++ b/data/storage/postgres/create_tariffplan_tables.sql @@ -419,6 +419,7 @@ CREATE INDEX tp_resources_unique ON tp_resources ("tpid", "tag", "filter_type", DROP TABLE IF EXISTS tp_stats; CREATE TABLE tp_stats ( + "tenant"varchar(64) NOT NULL, "id" SERIAL PRIMARY KEY, "tpid" varchar(64) NOT NULL, "tag" varchar(64) NOT NULL, diff --git a/data/tariffplans/testtp/Stats.csv b/data/tariffplans/testtp/Stats.csv index e3c87b60a..93311a0e6 100755 --- a/data/tariffplans/testtp/Stats.csv +++ b/data/tariffplans/testtp/Stats.csv @@ -1,2 +1,2 @@ -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11] -Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 \ No newline at end of file diff --git a/data/tariffplans/tutorial/Stats.csv b/data/tariffplans/tutorial/Stats.csv index e3c87b60a..93311a0e6 100755 --- a/data/tariffplans/tutorial/Stats.csv +++ b/data/tariffplans/tutorial/Stats.csv @@ -1,2 +1,2 @@ -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11] -Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 \ No newline at end of file diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index 6a79b3b9e..d472eac1d 100755 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -273,12 +273,13 @@ ResGroup21,*rsr_fields,,HdrSubject(~^1.*1$);HdrDestination(1002),,,,,,,, ResGroup22,*destinations,HdrDestination,DST_FS,2014-07-29T15:00:00Z,3600s,2,premium_call,true,true,10, ` stats = ` -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],QueueLength[5],TTL[6],Metrics[7],Blocker[8],Stored[9],Weight[10],Thresholds[11] -Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,Stats1,*string,Account,1001;1002,2014-07-29T15:00:00Z,100,1s,*asr;*acd;*acc,true,true,20,THRESH1;THRESH2 ` thresholds = ` #Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],ThresholdType[5],ThresholdValue[6],MinItems[7],Recurrent[8],MinSleep[9],Blocker[10],Stored[11],Weight[12],ActionIDs[13] Threshold1,*string,Account,1001;1002,2014-07-29T15:00:00Z,,1.2,10,true,1s,true,true,10, + ` ) @@ -1435,6 +1436,7 @@ func TestLoadResourceProfiles(t *testing.T) { func TestLoadStats(t *testing.T) { eStats := map[string]*utils.TPStats{ "Stats1": &utils.TPStats{ + Tenant:"Tester", TPid: testTPID, ID: "Stats1", Filters: []*utils.TPRequestFilter{ diff --git a/engine/model_helpers.go b/engine/model_helpers.go index b53bda988..11b6d8500 100755 --- a/engine/model_helpers.go +++ b/engine/model_helpers.go @@ -1960,6 +1960,7 @@ func (tps TpStatsS) AsTPStats() (result []*utils.TPStats) { st, found := mst[tp.Tag] if !found { st = &utils.TPStats{ + Tenant: tp.Tenant, TPid: tp.Tpid, ID: tp.Tag, Blocker: tp.Blocker, @@ -2027,6 +2028,7 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) { } for i, fltr := range st.Filters { mdl := &TpStats{ + Tenant: st.Tenant, Tpid: st.TPid, Tag: st.ID, } @@ -2072,6 +2074,7 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) { func APItoStats(tpST *utils.TPStats, timezone string) (st *StatQueueProfile, err error) { st = &StatQueueProfile{ + Tenant: tpST.Tenant, ID: tpST.ID, QueueLength: tpST.QueueLength, Weight: tpST.Weight, diff --git a/engine/models.go b/engine/models.go index c0caf7364..bc03cd5b6 100755 --- a/engine/models.go +++ b/engine/models.go @@ -482,18 +482,19 @@ func (t TBLVersion) TableName() string { type TpStats struct { ID int64 Tpid string - Tag string `index:"0" re:""` - FilterType string `index:"1" re:"^\*[A-Za-z].*"` - FilterFieldName string `index:"2" re:""` - FilterFieldValues string `index:"3" re:""` - ActivationInterval string `index:"4" re:""` - QueueLength int `index:"5" re:""` - TTL string `index:"6" re:""` - Metrics string `index:"7" re:""` - Blocker bool `index:"8" re:""` - Stored bool `index:"9" re:""` - Weight float64 `index:"10" re:"\d+\.?\d*"` - Thresholds string `index:"11" re:""` + Tenant string `index:"0" re:""` + Tag string `index:"1" re:""` + FilterType string `index:"2" re:"^\*[A-Za-z].*"` + FilterFieldName string `index:"3" re:""` + FilterFieldValues string `index:"4" re:""` + ActivationInterval string `index:"5" re:""` + QueueLength int `index:"6" re:""` + TTL string `index:"7" re:""` + Metrics string `index:"8" re:""` + Blocker bool `index:"9" re:""` + Stored bool `index:"10" re:""` + Weight float64 `index:"11" re:"\d+\.?\d*"` + Thresholds string `index:"12" re:""` CreatedAt time.Time } diff --git a/engine/storage_sql.go b/engine/storage_sql.go index a7263cab0..7cd83c7aa 100755 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -588,7 +588,7 @@ func (self *SQLStorage) SetTPStats(sts []*utils.TPStats) error { tx := self.db.Begin() for _, stq := range sts { // Remove previous - if err := tx.Where(&TpStats{Tpid: stq.TPid, Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { + if err := tx.Where(&TpStats{Tpid: stq.TPid,Tenant: stq.Tenant,Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { tx.Rollback() return err } diff --git a/engine/stordb_it_test.go b/engine/stordb_it_test.go index cb7fb6840..3d4f5ab12 100755 --- a/engine/stordb_it_test.go +++ b/engine/stordb_it_test.go @@ -1541,6 +1541,7 @@ func testStorDBitCRUDTpStats(t *testing.T) { eTPs := []*utils.TPStats{ &utils.TPStats{ TPid: "TEST_TPID", + Tenant:"Test", ID: "Stats1", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ diff --git a/utils/apitpdata.go b/utils/apitpdata.go index e894171e7..754088e49 100755 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -1335,6 +1335,7 @@ type AttrDisconnectSession struct { // TPStats is used in APIs to manage remotely offline Stats config type TPStats struct { TPid string + Tenant string ID string Filters []*TPRequestFilter ActivationInterval *TPActivationInterval From 2abe6b69c402c7f1b740d5343e269bbdb1171557 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Tue, 12 Sep 2017 15:24:27 +0000 Subject: [PATCH 05/10] Fixed the script --- data/storage/migrator/dbsmerge_mongo.py | 31 +++-- data/storage/migrator/dbsmerge_redis.py | 29 ++-- data/storage/migrator/mysql_cdr_migration.sql | 7 +- data/storage/migrator/pg_cdr_migration.sql | 4 + data/storage/migrator/rc7_to_rc8.sh | 124 ++++++++---------- data/storage/migrator/usage_mysql.py | 40 ------ data/storage/migrator/usage_postgres.py | 40 ------ 7 files changed, 91 insertions(+), 184 deletions(-) delete mode 100755 data/storage/migrator/usage_mysql.py delete mode 100755 data/storage/migrator/usage_postgres.py diff --git a/data/storage/migrator/dbsmerge_mongo.py b/data/storage/migrator/dbsmerge_mongo.py index 181dd90e6..fb8d3f6d7 100755 --- a/data/storage/migrator/dbsmerge_mongo.py +++ b/data/storage/migrator/dbsmerge_mongo.py @@ -6,21 +6,6 @@ # ^ the script will "move" the collections if source and target server are the same # but will "copy" (dump/restore) if source and target servers are different - -from_host =os.environ["from_host"] -from_port =os.environ["from_port"] -from_auth_db =os.environ["from_auth_db"] -from_user =os.environ["from_user"] -from_pass =os.environ["from_pass"] - -to_host =os.environ["to_host"] -to_port =os.environ["to_port"] -to_auth_db =os.environ["to_auth_db"] -to_user =os.environ["to_user"] -to_pass =os.environ["to_pass"] - - - ignore_empty_cols = True # Do not migrate collections with 0 document count. # Works only if from/to is on same host. @@ -31,12 +16,26 @@ ignore_empty_cols = True drop_target = False dump_folder = 'dump' - +import os import sys from pymongo import MongoClient from urllib import quote_plus from collections import OrderedDict +from_host =os.environ["cgr_from_host"] +from_port =os.environ["cgr_from_port"] +from_db =os.environ["cgr_from_db"] +from_auth_db =os.environ["cgr_from_auth_db"] +from_user =os.environ["cgr_from_user"] +from_pass =os.environ["cgr_from_pass"] + +to_host =os.environ["cgr_to_host"] +to_port =os.environ["cgr_to_port"] +to_db =os.environ["cgr_to_db"] +to_auth_db =os.environ["cgr_to_auth_db"] +to_user =os.environ["cgr_to_user"] +to_pass =os.environ["cgr_to_pass"] + # same server if from_host == to_host and from_port == to_port: print('Migrating on same server...') diff --git a/data/storage/migrator/dbsmerge_redis.py b/data/storage/migrator/dbsmerge_redis.py index c72a1dcb6..ae7564585 100755 --- a/data/storage/migrator/dbsmerge_redis.py +++ b/data/storage/migrator/dbsmerge_redis.py @@ -7,17 +7,6 @@ # (https://github.com/antirez/redis/pull/2507) # behaviour: # ^ the script will not overwrite keys on the destination server/database - -from_host = '192.168.100.40' -from_port = 6379 -from_db = 11 -from_pass = '' - -to_host = '192.168.100.40' -to_port = 6379 -to_db = 10 -to_pass = '' # Not used - keymask = '*' timeout = 2000 @@ -25,17 +14,15 @@ import time import redis import os -from_host =os.environ["from_host"] -from_port =os.environ["from_port"] -from_db =os.environ["from_db"] -from_pass =os.environ["from_pass"] - -to_host =os.environ["to_host"] -to_port =os.environ["to_port"] -to_db =os.environ["to_db"] -to_pass =os.environ["to_pass"] - +from_host =str(os.environ["cgr_from_host"]) +from_port = int(os.environ["cgr_from_port"]) +from_db =int(os.environ["cgr_from_db"]) +from_pass =os.environ["cgr_from_pass"] +to_host =os.environ["cgr_to_host"] +to_port =int(os.environ["cgr_to_port"]) +to_db =int(os.environ["cgr_to_db"]) +# to_pass =os.environ["cgr_to_pass"] # Not used from_redis = redis.Redis(host = from_host, port = from_port, password=from_pass, db = from_db) to_redis = redis.Redis(host = to_host, port = to_port, db = to_db) diff --git a/data/storage/migrator/mysql_cdr_migration.sql b/data/storage/migrator/mysql_cdr_migration.sql index d1cb6bfd3..f2a9d32c2 100755 --- a/data/storage/migrator/mysql_cdr_migration.sql +++ b/data/storage/migrator/mysql_cdr_migration.sql @@ -55,4 +55,9 @@ END // DELIMITER ; -CALL cgrates.migration(); \ No newline at end of file +CALL cgrates.migration(); + +ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9); +ALTER TABLE cdrs ADD `usage` DECIMAL(30); +UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL; +ALTER TABLEX cdrs DROP COLUMN usage_old; \ No newline at end of file diff --git a/data/storage/migrator/pg_cdr_migration.sql b/data/storage/migrator/pg_cdr_migration.sql index 942bfc033..bb562217d 100755 --- a/data/storage/migrator/pg_cdr_migration.sql +++ b/data/storage/migrator/pg_cdr_migration.sql @@ -41,3 +41,7 @@ BEGIN END LOOP; END $$; +ALTER TABLE cdrs RENAME COLUMN usage to usage_old; +ALTER TABLE cdrs ADD usage NUMERIC(30); +UPDATE cdrs SET usage = usage_old * 1000000000 WHERE usage_old IS NOT NULL; +ALTER TABLE cdrs DROP COLUMN usage_old; \ No newline at end of file diff --git a/data/storage/migrator/rc7_to_rc8.sh b/data/storage/migrator/rc7_to_rc8.sh index 581ff54fb..3f4b3b809 100755 --- a/data/storage/migrator/rc7_to_rc8.sh +++ b/data/storage/migrator/rc7_to_rc8.sh @@ -1,95 +1,89 @@ #! /usr/bin/env sh - -#settings - echo "" echo "rc7_to_rc8.sh" +#settings + +#DBs Config +datadb="redis" +stordb="mysql" + echo "dataDB:"$datadb " storDB:"$stordb echo "" - -$datadb="redis" - -if [$datadb="redis"];then +#dataDBs +case $datadb in +"redis") #Redis Config -export from_host = '192.168.100.40' -export from_port = 6379 -export from_db = 11 -export from_pass = '' +export cgr_from_host='127.0.0.1' +export cgr_from_port=6379 +export cgr_from_db=11 +export cgr_from_pass='' -export to_host = '192.168.100.40' -export to_port = 6379 -export to_db = 10 -export to_pass = '' # Not used - -else if [$datadb="mongo"];then +export cgr_to_host='127.0.0.1' +export cgr_to_port=6379 +export cgr_to_db=10 +export cgr_to_pass='' # Not used +;; +"mongo") #Mongo Config -export from_host = '127.0.0.1' -export from_port = '27017' -export from_db = '11' -export from_auth_db = 'cgrates' # Auth db on source server -export from_user = 'cgrates' -export from_pass = '' +export cgr_from_host='127.0.0.1' +export cgr_from_port='27017' +export cgr_from_db='11' +export cgr_from_auth_db='cgrates' # Auth db on source server +export cgr_from_user='cgrates' +export cgr_from_pass='' -export to_host = '127.0.0.1' -export to_port = '27017' -export to_db = '10' -export to_auth_db = "cgrates" # Auth db on target server -export to_user = 'cgrates' -export to_pass = '' -fi +export cgr_to_host='127.0.0.1' +export cgr_to_port='27017' +export cgr_to_db='10' +export cgr_to_auth_db="cgrates" # Auth db on target server +export cgr_to_user='cgrates' +export cgr_to_pass='' +;; +esac +export PGPASSWORD="CGRateS.org" +#StorDBs +case $stordb in +"mysql") +#mysql Config +user="cgrates" +host="127.0.0.1" +db="cgrates" +;; -if [$stordb="redis"];then -#Redis Config -export from_host = '192.168.100.40' -export from_port = 6379 -export from_db = 11 -export from_pass = '' - -export to_host = '192.168.100.40' -export to_port = 6379 -export to_db = 10 -export to_pass = '' # Not used - -else if [$stordb="mysql"];then -#Mongo Config -export from_host = '127.0.0.1' -export from_port = '27017' -export from_db = '11' -export from_auth_db = 'cgrates' # Auth db on source server -export from_user = 'cgrates' -export from_pass = '' - -export to_host = '127.0.0.1' -export to_port = '27017' -export to_db = '10' -export to_auth_db = "cgrates" # Auth db on target server -export to_user = 'cgrates' -export to_pass = '' -fi - - +"postgres") +#postgres Config +user="cgrates" +host="127.0.0.1" +db="cgrates" +;; +esac DIR="$(dirname "$(readlink -f "$0")")" - +#DataDB switch case $datadb in + "redis") ./dbsmerge_redis.py ;; + "mongo") ./dbsmerge_mongo.py ;; esac +#StorDB switch case $stordb in - "mysql") + +"mysql") mysql -u$user -p$PGPASSWORD -h $host < "$DIR"/mysql_tables_update.sql up=$? mysql -u$user -p$PGPASSWORD -h $host -D cgrates < "$DIR"/mysql_cdr_migration.sql mig=$? ;; + "postgres") psql -U $user -h $host -d cgrates -f "$DIR"/pq_tables_update.sql up=$? @@ -99,8 +93,6 @@ mig=$? esac if [ $up = 0 ] && [ $mig = 0 ]; then - echo -e "\n\t+++ CGR-DB successfully set-up! +++\n" + echo -e "\n\t+++ The script ran successfully ! +++\n" exit 0 -fi - - +fi \ No newline at end of file diff --git a/data/storage/migrator/usage_mysql.py b/data/storage/migrator/usage_mysql.py deleted file mode 100755 index 2d062c87c..000000000 --- a/data/storage/migrator/usage_mysql.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/python - -# depends: -# ^ mysql (debian: python-mysql.connector) - -host = '192.168.100.40' -port = 3306 -database = 'cgrates' -user = 'root' -password = 'CGRateS.org' - -import mysql.connector - -config = { - 'user': user, - 'password': password, - 'host': host, - 'port': port, - 'database': database, -} - -print('Connecting to MySQL...') -cnx = mysql.connector.connect(**config) -cursor = cnx.cursor() - -print('Renaming old column...') -cursor.execute('ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9)') - -print('Adding new column...') -cursor.execute('ALTER TABLE cdrs ADD `usage` DECIMAL(30)') - -print('Setting new values...') -cursor.execute('UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL') - -print('Deleting old column...') -cursor.execute('ALTER TABLEX cdrs DROP COLUMN usage_old') - -print('Closing MySQL connection...') -cnx.close() - diff --git a/data/storage/migrator/usage_postgres.py b/data/storage/migrator/usage_postgres.py deleted file mode 100755 index 621321f8a..000000000 --- a/data/storage/migrator/usage_postgres.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/python - -# depends: -# ^ psycopg2 (debian: python-psycopg2) - -host = '127.0.0.1' -port = 5432 -database = 'cgrates' -user = 'cgrates' -password = 'CGRateS.org' - -import psycopg2 - -print('Connecting to PostgreSQL...') -cnx = psycopg2.connect( - host=host, - port=port, - dbname=database, - user=user, - password=password - ) -cursor = cnx.cursor() - -print('Renaming old column...') -cursor.execute('ALTER TABLE cdrs RENAME COLUMN usage to usage_old') - -print('Adding new column...') -cursor.execute('ALTER TABLE cdrs ADD usage NUMERIC(30)') - -print('Setting new values...') -cursor.execute('UPDATE cdrs SET usage = usage_old * 1000000000 WHERE usage_old IS NOT NULL') - -print('Deleting old column...') -cursor.execute('ALTER TABLE cdrs DROP COLUMN usage_old') - -print('Commiting...') -cnx.commit() - -print('Closing PostgreSQL connection...') -cnx.close() From c5a1464d6bfe7586dbc25686bf3a236110243d99 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Tue, 12 Sep 2017 15:26:22 +0000 Subject: [PATCH 06/10] small fix --- data/scripts/migrator/usage_mysql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/migrator/usage_mysql.py b/data/scripts/migrator/usage_mysql.py index 2d062c87c..1401f1897 100755 --- a/data/scripts/migrator/usage_mysql.py +++ b/data/scripts/migrator/usage_mysql.py @@ -3,7 +3,7 @@ # depends: # ^ mysql (debian: python-mysql.connector) -host = '192.168.100.40' +host = '127.0.0.1' port = 3306 database = 'cgrates' user = 'root' From d01f8b58dc1bc3e4de00a62a0b5c6683a34c9d86 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Wed, 13 Sep 2017 09:08:09 +0000 Subject: [PATCH 07/10] Added Tenant to resources --- data/storage/migrator/rc7_to_rc8.sh | 6 ++++- .../mysql/create_tariffplan_tables.sql | 1 + .../postgres/create_tariffplan_tables.sql | 1 + data/tariffplans/testtp/Resources.csv | 12 ++++----- data/tariffplans/tutorial/Resources.csv | 12 ++++----- engine/loader_csv_test.go | 12 +++++---- engine/model_helpers.go | 3 +++ engine/model_helpers_test.go | 5 ++++ engine/models.go | 25 ++++++++++--------- engine/resources.go | 3 +++ engine/storage_sql.go | 5 +--- utils/apitpdata.go | 1 + 12 files changed, 52 insertions(+), 34 deletions(-) diff --git a/data/storage/migrator/rc7_to_rc8.sh b/data/storage/migrator/rc7_to_rc8.sh index 3f4b3b809..5f6d7143e 100755 --- a/data/storage/migrator/rc7_to_rc8.sh +++ b/data/storage/migrator/rc7_to_rc8.sh @@ -66,11 +66,15 @@ DIR="$(dirname "$(readlink -f "$0")")" case $datadb in "redis") -./dbsmerge_redis.py +echo "executing dbsmerge_redis.py" +./dbsmerge_redis.py +echo "done!" ;; "mongo") +echo "executing dbsmerge_mongo.py" ./dbsmerge_mongo.py +echo "done!" ;; esac diff --git a/data/storage/mysql/create_tariffplan_tables.sql b/data/storage/mysql/create_tariffplan_tables.sql index 5f74cd89e..e62bd1146 100644 --- a/data/storage/mysql/create_tariffplan_tables.sql +++ b/data/storage/mysql/create_tariffplan_tables.sql @@ -397,6 +397,7 @@ CREATE TABLE tp_aliases ( DROP TABLE IF EXISTS tp_resources; CREATE TABLE tp_resources ( + `tenant` varchar(64) NOT NULL, `id` int(11) NOT NULL AUTO_INCREMENT, `tpid` varchar(64) NOT NULL, `tag` varchar(64) NOT NULL, diff --git a/data/storage/postgres/create_tariffplan_tables.sql b/data/storage/postgres/create_tariffplan_tables.sql index 5bb88688c..c0772103c 100644 --- a/data/storage/postgres/create_tariffplan_tables.sql +++ b/data/storage/postgres/create_tariffplan_tables.sql @@ -393,6 +393,7 @@ CREATE INDEX tpaliases_idx ON tp_aliases (tpid,direction,tenant,category,account DROP TABLE IF EXISTS tp_resources; CREATE TABLE tp_resources ( + "tenant"varchar(64) NOT NULL, "id" SERIAL PRIMARY KEY, "tpid" varchar(64) NOT NULL, "tag" varchar(64) NOT NULL, diff --git a/data/tariffplans/testtp/Resources.csv b/data/tariffplans/testtp/Resources.csv index 06204d7c7..6597b961d 100755 --- a/data/tariffplans/testtp/Resources.csv +++ b/data/tariffplans/testtp/Resources.csv @@ -1,6 +1,6 @@ -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Blocker[8],Stored[9],Weight[10],Thresholds[11] -ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20, -ResGroup1,*string_prefix,Destination,10;20,,,,,,,, -ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,, -ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10, -ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,, +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20, +Tester,ResGroup1,*string_prefix,Destination,10;20,,,,,,,, +Tester,ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,, +Tester,ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10, +Tester,ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,, diff --git a/data/tariffplans/tutorial/Resources.csv b/data/tariffplans/tutorial/Resources.csv index 980438b6a..30f655dcc 100755 --- a/data/tariffplans/tutorial/Resources.csv +++ b/data/tariffplans/tutorial/Resources.csv @@ -1,8 +1,8 @@ -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Blocker[8],Stored[9],Weight[10],Thresholds[11] -ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20, -ResGroup1,*string_prefix,Destination,10;20,,,,,,,, -ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,, -ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10, -ResGroup3,*string,Account,3001,2014-07-29T15:00:00Z,1s,3,,true,true,20, +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,ResGroup1,*string,Account,1001;1002,2014-07-29T15:00:00Z,1s,7,,true,true,20, +Tester,ResGroup1,*string_prefix,Destination,10;20,,,,,,,, +Tester,ResGroup1,*rsr_fields,,Subject(~^1.*1$);Destination(1002),,,,,,,, +Tester,ResGroup2,*destinations,Destination,DST_FS,2014-07-29T15:00:00Z,3600s,8,SPECIAL_1002,true,true,10, +Tester,ResGroup3,*string,Account,3001,2014-07-29T15:00:00Z,1s,3,,true,true,20, #ResGroup3,*timings,SetupTime,PEAK,,,,,,,, #ResGroup3,*cdr_stats,,CDRST1:*min_ASR:34;CDRST_1001:*min_ASR:20,,,,,,,, diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index d472eac1d..798aa0a4a 100755 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -266,11 +266,11 @@ cgrates.org,mas,true,another,value,10 *out,cgrates.org,call,remo,remo,*any,*rating,Account,remo,minu,10 ` resProfiles = ` -#Id[0],FilterType[1],FilterFieldName[2],FilterFieldValues[3],ActivationInterval[4],TTL[5],Limit[6],AllocationMessage[7],Weight[8],Thresholds[9] -ResGroup21,*string,HdrAccount,1001;1002,2014-07-29T15:00:00Z,1s,2,call,true,true,10, -ResGroup21,*string_prefix,HdrDestination,10;20,,,,,,,, -ResGroup21,*rsr_fields,,HdrSubject(~^1.*1$);HdrDestination(1002),,,,,,,, -ResGroup22,*destinations,HdrDestination,DST_FS,2014-07-29T15:00:00Z,3600s,2,premium_call,true,true,10, +#Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],TTL[6],Limit[7],AllocationMessage[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] +Tester,ResGroup21,*string,HdrAccount,1001;1002,2014-07-29T15:00:00Z,1s,2,call,true,true,10, +Tester,ResGroup21,*string_prefix,HdrDestination,10;20,,,,,,,, +Tester,ResGroup21,*rsr_fields,,HdrSubject(~^1.*1$);HdrDestination(1002),,,,,,,, +Tester,ResGroup22,*destinations,HdrDestination,DST_FS,2014-07-29T15:00:00Z,3600s,2,premium_call,true,true,10, ` stats = ` #Tenant[0],Id[1],FilterType[2],FilterFieldName[3],FilterFieldValues[4],ActivationInterval[5],QueueLength[6],TTL[7],Metrics[8],Blocker[9],Stored[10],Weight[11],Thresholds[12] @@ -1393,6 +1393,7 @@ func TestLoadResourceProfiles(t *testing.T) { eResProfiles := map[string]*utils.TPResource{ "ResGroup21": &utils.TPResource{ TPid: testTPID, + Tenant:"Tester", ID: "ResGroup21", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{Type: MetaString, FieldName: "HdrAccount", Values: []string{"1001", "1002"}}, @@ -1409,6 +1410,7 @@ func TestLoadResourceProfiles(t *testing.T) { }, "ResGroup22": &utils.TPResource{ TPid: testTPID, + Tenant:"Tester", ID: "ResGroup22", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{Type: MetaDestinations, FieldName: "HdrDestination", Values: []string{"DST_FS"}}, diff --git a/engine/model_helpers.go b/engine/model_helpers.go index 11b6d8500..b06b5cce8 100755 --- a/engine/model_helpers.go +++ b/engine/model_helpers.go @@ -1821,6 +1821,7 @@ func (tps TpResources) AsTPResources() (result []*utils.TPResource) { if !found { rl = &utils.TPResource{ TPid: tp.Tpid, + Tenant: tp.Tenant, ID: tp.Tag, Blocker: tp.Blocker, Stored: tp.Stored, @@ -1880,6 +1881,7 @@ func APItoModelResource(rl *utils.TPResource) (mdls TpResources) { for i, fltr := range rl.Filters { mdl := &TpResource{ Tpid: rl.TPid, + Tenant: rl.Tenant, Tag: rl.ID, } if i == 0 { @@ -1921,6 +1923,7 @@ func APItoModelResource(rl *utils.TPResource) (mdls TpResources) { func APItoResource(tpRL *utils.TPResource, timezone string) (rp *ResourceProfile, err error) { rp = &ResourceProfile{ + Tenant: tpRL.Tenant, ID: tpRL.ID, Weight: tpRL.Weight, Blocker: tpRL.Blocker, diff --git a/engine/model_helpers_test.go b/engine/model_helpers_test.go index ee9fba139..bc57d7df3 100755 --- a/engine/model_helpers_test.go +++ b/engine/model_helpers_test.go @@ -713,6 +713,7 @@ func TestTpResourcesAsTpResources(t *testing.T) { tps := []*TpResource{ &TpResource{ Tpid: "TEST_TPID", + Tenant: "Tester", Tag: "ResGroup1", FilterType: MetaStringPrefix, FilterFieldName: "Destination", @@ -726,12 +727,14 @@ func TestTpResourcesAsTpResources(t *testing.T) { &TpResource{ Tpid: "TEST_TPID", Tag: "ResGroup1", + Tenant: "Tester", FilterType: MetaStringPrefix, FilterFieldName: "Category", FilterFieldValues: "call;inbound_call", Thresholds: "WARN3"}, &TpResource{ Tpid: "TEST_TPID", + Tenant: "Tester", Tag: "ResGroup2", FilterType: MetaStringPrefix, FilterFieldName: "Destination", @@ -745,6 +748,7 @@ func TestTpResourcesAsTpResources(t *testing.T) { eTPs := []*utils.TPResource{ &utils.TPResource{ TPid: tps[0].Tpid, + Tenant: tps[0].Tenant, ID: tps[0].Tag, Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ @@ -769,6 +773,7 @@ func TestTpResourcesAsTpResources(t *testing.T) { }, &utils.TPResource{ TPid: tps[2].Tpid, + Tenant: tps[2].Tenant, ID: tps[2].Tag, Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ diff --git a/engine/models.go b/engine/models.go index bc03cd5b6..8b7fd037a 100755 --- a/engine/models.go +++ b/engine/models.go @@ -454,18 +454,19 @@ func (t TBLSMCosts) TableName() string { type TpResource struct { ID int64 Tpid string - Tag string `index:"0" re:""` - FilterType string `index:"1" re:"^\*[A-Za-z].*"` - FilterFieldName string `index:"2" re:""` - FilterFieldValues string `index:"3" re:""` - ActivationInterval string `index:"4" re:""` - UsageTTL string `index:"5" re:""` - Limit string `index:"6" re:""` - AllocationMessage string `index:"7" re:""` - Blocker bool `index:"8" re:""` - Stored bool `index:"9" re:""` - Weight float64 `index:"10" re:"\d+\.?\d*"` - Thresholds string `index:"11" re:""` + Tenant string `index:"0" re:""` + Tag string `index:"1" re:""` + FilterType string `index:"2" re:"^\*[A-Za-z].*"` + FilterFieldName string `index:"3" re:""` + FilterFieldValues string `index:"4" re:""` + ActivationInterval string `index:"5" re:""` + UsageTTL string `index:"6" re:""` + Limit string `index:"7" re:""` + AllocationMessage string `index:"8" re:""` + Blocker bool `index:"9" re:""` + Stored bool `index:"10" re:""` + Weight float64 `index:"11" re:"\d+\.?\d*"` + Thresholds string `index:"12" re:""` CreatedAt time.Time } diff --git a/engine/resources.go b/engine/resources.go index 78f7d2dc2..fceea912e 100755 --- a/engine/resources.go +++ b/engine/resources.go @@ -39,6 +39,7 @@ func init() { // ResourceProfile represents the user configuration for the resource type ResourceProfile struct { + Tenant string ID string // identifier of this resource Filters []*RequestFilter // filters for the request ActivationInterval *utils.ActivationInterval // time when this resource becomes active and expires @@ -53,6 +54,7 @@ type ResourceProfile struct { // ResourceUsage represents an usage counted type ResourceUsage struct { + Tenant string ID string // Unique identifier of this ResourceUsage, Eg: FreeSWITCH UUID ExpiryTime time.Time Units float64 // Number of units used @@ -66,6 +68,7 @@ func (ru *ResourceUsage) isActive(atTime time.Time) bool { // Resource represents a resource in the system // not thread safe, needs locking at process level type Resource struct { + Tenant string ID string Usages map[string]*ResourceUsage TTLIdx []string // holds ordered list of ResourceIDs based on their TTL, empty if feature is disabled diff --git a/engine/storage_sql.go b/engine/storage_sql.go index 7cd83c7aa..f5fd41d6e 100755 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -588,7 +588,7 @@ func (self *SQLStorage) SetTPStats(sts []*utils.TPStats) error { tx := self.db.Begin() for _, stq := range sts { // Remove previous - if err := tx.Where(&TpStats{Tpid: stq.TPid,Tenant: stq.Tenant,Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { + if err := tx.Where(&TpStats{Tpid: stq.TPid,Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { tx.Rollback() return err } @@ -1171,8 +1171,6 @@ func (self *SQLStorage) GetTPDestinationRates(tpid, id string, pagination *utils func (self *SQLStorage) GetTPTimings(tpid, id string) ([]*utils.ApierTPTiming, error) { var tpTimings TpTimings q := self.db.Where("tpid = ?", tpid) - utils.Logger.Debug(fmt.Sprintf("#1 Id care trimite %s", id)) - utils.Logger.Debug(fmt.Sprintf("#1 TPId care trimite %s", tpid)) if len(id) != 0 { q = q.Where("tag = ?", id) } @@ -1180,7 +1178,6 @@ func (self *SQLStorage) GetTPTimings(tpid, id string) ([]*utils.ApierTPTiming, e return nil, err } ts := tpTimings.AsTPTimings() - utils.Logger.Debug(fmt.Sprintf("#2 ce gaseste : %s", ts)) if len(ts) == 0 { return ts, utils.ErrNotFound } diff --git a/utils/apitpdata.go b/utils/apitpdata.go index 754088e49..db55e86b7 100755 --- a/utils/apitpdata.go +++ b/utils/apitpdata.go @@ -1270,6 +1270,7 @@ type AttrSetBalance struct { type TPResource struct { TPid string + Tenant string ID string // Identifier of this limit Filters []*TPRequestFilter // Filters for the request ActivationInterval *TPActivationInterval // Time when this limit becomes active/expires From fd01dd409dd74025bad552cee5da2f8597ec9d12 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Wed, 13 Sep 2017 09:16:01 +0000 Subject: [PATCH 08/10] small fix --- engine/storage_sql.go | 1 - 1 file changed, 1 deletion(-) diff --git a/engine/storage_sql.go b/engine/storage_sql.go index f5fd41d6e..7d6bb583a 100755 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -221,7 +221,6 @@ func (self *SQLStorage) SetTPTimings(timings []*utils.ApierTPTiming) error { tx := self.db.Begin() for _, timing := range timings { - utils.Logger.Debug(fmt.Sprintf("#1(set) Id care trimite %s", timing.ID)) if err := tx.Where(&TpTiming{Tpid: timing.TPid, Tag: timing.ID}).Delete(TpTiming{}).Error; err != nil { tx.Rollback() return err From d9b4bbb60454bb0cb13b90e4c3bae0cb377eee4d Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Wed, 13 Sep 2017 14:27:42 +0000 Subject: [PATCH 09/10] Small fixes --- data/storage/migrator/mysql_cdr_migration.sql | 7 +------ data/storage/migrator/mysql_tables_update.sql | 5 +++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/data/storage/migrator/mysql_cdr_migration.sql b/data/storage/migrator/mysql_cdr_migration.sql index f2a9d32c2..d1cb6bfd3 100755 --- a/data/storage/migrator/mysql_cdr_migration.sql +++ b/data/storage/migrator/mysql_cdr_migration.sql @@ -55,9 +55,4 @@ END // DELIMITER ; -CALL cgrates.migration(); - -ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9); -ALTER TABLE cdrs ADD `usage` DECIMAL(30); -UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL; -ALTER TABLEX cdrs DROP COLUMN usage_old; \ No newline at end of file +CALL cgrates.migration(); \ No newline at end of file diff --git a/data/storage/migrator/mysql_tables_update.sql b/data/storage/migrator/mysql_tables_update.sql index 7cbeaf335..da020828f 100755 --- a/data/storage/migrator/mysql_tables_update.sql +++ b/data/storage/migrator/mysql_tables_update.sql @@ -72,3 +72,8 @@ CREATE TABLE versions ( + +ALTER TABLE cdrs CHANGE COLUMN `usage` `usage_old` DECIMAL(30,9); +ALTER TABLE cdrs ADD `usage` DECIMAL(30); +UPDATE cdrs SET `usage` = `usage_old` * 1000000000 WHERE usage_old IS NOT NULL; +ALTER TABLE cdrs DROP COLUMN usage_old; \ No newline at end of file From 54133db99b818a1409b0b6e27f8ec8d45b9948f2 Mon Sep 17 00:00:00 2001 From: edwardro22 Date: Wed, 13 Sep 2017 15:08:24 +0000 Subject: [PATCH 10/10] Formated code --- apier/v1/tpresources_it_test.go | 1 + engine/libtest.go | 4 ++-- engine/loader_csv_test.go | 18 +++++++++--------- engine/model_helpers.go | 16 ++++++++-------- engine/model_helpers_test.go | 18 +++++++++--------- engine/models.go | 4 ++-- engine/resources.go | 6 +++--- engine/storage_sql.go | 2 +- engine/stordb_it_test.go | 6 +++--- 9 files changed, 38 insertions(+), 37 deletions(-) diff --git a/apier/v1/tpresources_it_test.go b/apier/v1/tpresources_it_test.go index 37f7c62a8..a49521dce 100644 --- a/apier/v1/tpresources_it_test.go +++ b/apier/v1/tpresources_it_test.go @@ -127,6 +127,7 @@ func testTPResGetTPResourceBeforeSet(t *testing.T) { func testTPResSetTPResource(t *testing.T) { tpRes = &utils.TPResource{ + Tenant:"Tester", TPid: "TPR1", ID: "Res", Filters: []*utils.TPRequestFilter{ diff --git a/engine/libtest.go b/engine/libtest.go index b203f7424..705dfcdd9 100644 --- a/engine/libtest.go +++ b/engine/libtest.go @@ -42,8 +42,8 @@ func InitDataDb(cfg *config.CGRConfig) error { return err } dataDB.LoadDataDBCache(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) - // Write version before starting - if err := CheckVersions(dataDB); err != nil { + // Write version before starting + if err := CheckVersions(dataDB); err != nil { return err } diff --git a/engine/loader_csv_test.go b/engine/loader_csv_test.go index 798aa0a4a..7c13357f8 100755 --- a/engine/loader_csv_test.go +++ b/engine/loader_csv_test.go @@ -1392,9 +1392,9 @@ func TestLoadReverseAliases(t *testing.T) { func TestLoadResourceProfiles(t *testing.T) { eResProfiles := map[string]*utils.TPResource{ "ResGroup21": &utils.TPResource{ - TPid: testTPID, - Tenant:"Tester", - ID: "ResGroup21", + TPid: testTPID, + Tenant: "Tester", + ID: "ResGroup21", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{Type: MetaString, FieldName: "HdrAccount", Values: []string{"1001", "1002"}}, &utils.TPRequestFilter{Type: MetaStringPrefix, FieldName: "HdrDestination", Values: []string{"10", "20"}}, @@ -1409,9 +1409,9 @@ func TestLoadResourceProfiles(t *testing.T) { Limit: "2", }, "ResGroup22": &utils.TPResource{ - TPid: testTPID, - Tenant:"Tester", - ID: "ResGroup22", + TPid: testTPID, + Tenant: "Tester", + ID: "ResGroup22", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{Type: MetaDestinations, FieldName: "HdrDestination", Values: []string{"DST_FS"}}, }, @@ -1438,9 +1438,9 @@ func TestLoadResourceProfiles(t *testing.T) { func TestLoadStats(t *testing.T) { eStats := map[string]*utils.TPStats{ "Stats1": &utils.TPStats{ - Tenant:"Tester", - TPid: testTPID, - ID: "Stats1", + Tenant: "Tester", + TPid: testTPID, + ID: "Stats1", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{Type: MetaString, FieldName: "Account", Values: []string{"1001", "1002"}}, }, diff --git a/engine/model_helpers.go b/engine/model_helpers.go index b06b5cce8..c34ea2175 100755 --- a/engine/model_helpers.go +++ b/engine/model_helpers.go @@ -1880,9 +1880,9 @@ func APItoModelResource(rl *utils.TPResource) (mdls TpResources) { } for i, fltr := range rl.Filters { mdl := &TpResource{ - Tpid: rl.TPid, - Tenant: rl.Tenant, - Tag: rl.ID, + Tpid: rl.TPid, + Tenant: rl.Tenant, + Tag: rl.ID, } if i == 0 { mdl.UsageTTL = rl.UsageTTL @@ -1963,7 +1963,7 @@ func (tps TpStatsS) AsTPStats() (result []*utils.TPStats) { st, found := mst[tp.Tag] if !found { st = &utils.TPStats{ - Tenant: tp.Tenant, + Tenant: tp.Tenant, TPid: tp.Tpid, ID: tp.Tag, Blocker: tp.Blocker, @@ -2031,9 +2031,9 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) { } for i, fltr := range st.Filters { mdl := &TpStats{ - Tenant: st.Tenant, - Tpid: st.TPid, - Tag: st.ID, + Tenant: st.Tenant, + Tpid: st.TPid, + Tag: st.ID, } if i == 0 { mdl.TTL = st.TTL @@ -2077,7 +2077,7 @@ func APItoModelStats(st *utils.TPStats) (mdls TpStatsS) { func APItoStats(tpST *utils.TPStats, timezone string) (st *StatQueueProfile, err error) { st = &StatQueueProfile{ - Tenant: tpST.Tenant, + Tenant: tpST.Tenant, ID: tpST.ID, QueueLength: tpST.QueueLength, Weight: tpST.Weight, diff --git a/engine/model_helpers_test.go b/engine/model_helpers_test.go index bc57d7df3..bad16444a 100755 --- a/engine/model_helpers_test.go +++ b/engine/model_helpers_test.go @@ -713,7 +713,7 @@ func TestTpResourcesAsTpResources(t *testing.T) { tps := []*TpResource{ &TpResource{ Tpid: "TEST_TPID", - Tenant: "Tester", + Tenant: "Tester", Tag: "ResGroup1", FilterType: MetaStringPrefix, FilterFieldName: "Destination", @@ -727,14 +727,14 @@ func TestTpResourcesAsTpResources(t *testing.T) { &TpResource{ Tpid: "TEST_TPID", Tag: "ResGroup1", - Tenant: "Tester", + Tenant: "Tester", FilterType: MetaStringPrefix, FilterFieldName: "Category", FilterFieldValues: "call;inbound_call", Thresholds: "WARN3"}, &TpResource{ Tpid: "TEST_TPID", - Tenant: "Tester", + Tenant: "Tester", Tag: "ResGroup2", FilterType: MetaStringPrefix, FilterFieldName: "Destination", @@ -747,9 +747,9 @@ func TestTpResourcesAsTpResources(t *testing.T) { } eTPs := []*utils.TPResource{ &utils.TPResource{ - TPid: tps[0].Tpid, - Tenant: tps[0].Tenant, - ID: tps[0].Tag, + TPid: tps[0].Tpid, + Tenant: tps[0].Tenant, + ID: tps[0].Tag, Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ Type: tps[0].FilterType, @@ -772,9 +772,9 @@ func TestTpResourcesAsTpResources(t *testing.T) { Thresholds: []string{"WARN_RES1", "WARN_RES2", "WARN3"}, }, &utils.TPResource{ - TPid: tps[2].Tpid, - Tenant: tps[2].Tenant, - ID: tps[2].Tag, + TPid: tps[2].Tpid, + Tenant: tps[2].Tenant, + ID: tps[2].Tag, Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ Type: tps[2].FilterType, diff --git a/engine/models.go b/engine/models.go index 8b7fd037a..e769f00a9 100755 --- a/engine/models.go +++ b/engine/models.go @@ -454,7 +454,7 @@ func (t TBLSMCosts) TableName() string { type TpResource struct { ID int64 Tpid string - Tenant string `index:"0" re:""` + Tenant string `index:"0" re:""` Tag string `index:"1" re:""` FilterType string `index:"2" re:"^\*[A-Za-z].*"` FilterFieldName string `index:"3" re:""` @@ -483,7 +483,7 @@ func (t TBLVersion) TableName() string { type TpStats struct { ID int64 Tpid string - Tenant string `index:"0" re:""` + Tenant string `index:"0" re:""` Tag string `index:"1" re:""` FilterType string `index:"2" re:"^\*[A-Za-z].*"` FilterFieldName string `index:"3" re:""` diff --git a/engine/resources.go b/engine/resources.go index 357f1b60b..40a2c5ba7 100755 --- a/engine/resources.go +++ b/engine/resources.go @@ -39,7 +39,7 @@ func init() { // ResourceProfile represents the user configuration for the resource type ResourceProfile struct { - Tenant string + Tenant string ID string // identifier of this resource Filters []*RequestFilter // filters for the request ActivationInterval *utils.ActivationInterval // time when this resource becomes active and expires @@ -54,7 +54,7 @@ type ResourceProfile struct { // ResourceUsage represents an usage counted type ResourceUsage struct { - Tenant string + Tenant string ID string // Unique identifier of this ResourceUsage, Eg: FreeSWITCH UUID ExpiryTime time.Time Units float64 // Number of units used @@ -68,7 +68,7 @@ func (ru *ResourceUsage) isActive(atTime time.Time) bool { // Resource represents a resource in the system // not thread safe, needs locking at process level type Resource struct { - Tenant string + Tenant string ID string Usages map[string]*ResourceUsage TTLIdx []string // holds ordered list of ResourceIDs based on their TTL, empty if feature is disabled diff --git a/engine/storage_sql.go b/engine/storage_sql.go index 7d6bb583a..fdf8c4de5 100755 --- a/engine/storage_sql.go +++ b/engine/storage_sql.go @@ -587,7 +587,7 @@ func (self *SQLStorage) SetTPStats(sts []*utils.TPStats) error { tx := self.db.Begin() for _, stq := range sts { // Remove previous - if err := tx.Where(&TpStats{Tpid: stq.TPid,Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { + if err := tx.Where(&TpStats{Tpid: stq.TPid, Tag: stq.ID}).Delete(TpStats{}).Error; err != nil { tx.Rollback() return err } diff --git a/engine/stordb_it_test.go b/engine/stordb_it_test.go index 3d4f5ab12..b2aae4f4b 100755 --- a/engine/stordb_it_test.go +++ b/engine/stordb_it_test.go @@ -1540,9 +1540,9 @@ func testStorDBitCRUDTpStats(t *testing.T) { //WRITE eTPs := []*utils.TPStats{ &utils.TPStats{ - TPid: "TEST_TPID", - Tenant:"Test", - ID: "Stats1", + TPid: "TEST_TPID", + Tenant: "Test", + ID: "Stats1", Filters: []*utils.TPRequestFilter{ &utils.TPRequestFilter{ Type: "filtertype",